diff options
Diffstat (limited to 'node-repository/src')
68 files changed, 414 insertions, 212 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java index 3d76c8e3f94..1406aaecb71 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java @@ -167,13 +167,18 @@ public class AllocatableClusterResources { advertisedResources = systemLimits.enlargeToLegal(advertisedResources, clusterSpec, exclusive); // Ask for something legal advertisedResources = applicationLimits.cap(advertisedResources); // Overrides other conditions, even if it will then fail var realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive); // What we'll really get + if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec) && advertisedResources.storageType() == NodeResources.StorageType.any) { + // Since local disk resreves some of the storage, try to constrain to remote disk + advertisedResources = advertisedResources.with(NodeResources.StorageType.remote); + realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive); + } if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec)) return Optional.empty(); if (anySatisfies(realResources, availableRealHostResources)) - return Optional.of(new AllocatableClusterResources(wantedResources.with(realResources), - advertisedResources, - wantedResources, - clusterSpec)); + return Optional.of(new AllocatableClusterResources(wantedResources.with(realResources), + advertisedResources, + wantedResources, + clusterSpec)); else return Optional.empty(); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java index 7d6a4730ed8..db5771cd623 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java @@ -32,6 +32,7 @@ public class ClusterModel { static final double idealQueryCpuLoad = 0.8; static final double idealWriteCpuLoad = 0.95; static final double idealMemoryLoad = 0.65; + static final double idealAdminMemoryLoad = 0.75; static final double idealContainerDiskLoad = 0.95; static final double idealContentDiskLoad = 0.6; @@ -207,7 +208,7 @@ public class ClusterModel { * if one of the nodes go down. */ public Load idealLoad() { - return new Load(idealCpuLoad(), idealMemoryLoad, idealDiskLoad()).divide(redundancyAdjustment()); + return new Load(idealCpuLoad(), idealMemoryLoad(), idealDiskLoad()).divide(redundancyAdjustment()); } public int nodesAdjustedForRedundancy(int nodes, int groups) { @@ -307,6 +308,12 @@ public class ClusterModel { return duration; } + private double idealMemoryLoad() { + if (clusterSpec.type() == ClusterSpec.Type.admin) + return idealAdminMemoryLoad; // Not autoscaled, but ideal shown in console + return idealMemoryLoad; + } + private double idealDiskLoad() { // Stateless clusters are not expected to consume more disk over time - // if they do it is due to logs which will be rotated away right before the disk is full diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java index 2eb57dcdd87..b86a24af5c9 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterNodesTimeseries.java @@ -5,7 +5,6 @@ import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.applications.Cluster; import java.time.Duration; -import java.time.Instant; import java.util.List; import java.util.Optional; import java.util.OptionalDouble; @@ -36,7 +35,7 @@ public class ClusterNodesTimeseries { var timeseries = db.getNodeTimeseries(period.plus(warmupDuration.multipliedBy(4)), clusterNodes); if (cluster.lastScalingEvent().isPresent()) { long currentGeneration = cluster.lastScalingEvent().get().generation(); - timeseries = keepCurrentGenerationAfterWarmup(timeseries, currentGeneration); + timeseries = keepGenerationAfterWarmup(timeseries, currentGeneration); } timeseries = keep(timeseries, snapshot -> snapshot.inService() && snapshot.stable()); timeseries = keep(timeseries, snapshot -> ! snapshot.at().isBefore(db.clock().instant().minus(period))); @@ -111,14 +110,14 @@ public class ClusterNodesTimeseries { } private static List<NodeTimeseries> keep(List<NodeTimeseries> timeseries, Predicate<NodeMetricSnapshot> filter) { - return timeseries.stream().map(nodeTimeseries -> nodeTimeseries.keep(filter)).collect(Collectors.toList()); + return timeseries.stream().map(nodeTimeseries -> nodeTimeseries.keep(filter)).toList(); } - private static List<NodeTimeseries> keepCurrentGenerationAfterWarmup(List<NodeTimeseries> timeseries, - long currentGeneration) { + private List<NodeTimeseries> keepGenerationAfterWarmup(List<NodeTimeseries> timeseries, long currentGeneration) { return timeseries.stream() - .map(nodeTimeseries -> nodeTimeseries.keepCurrentGenerationAfterWarmup(currentGeneration)) - .collect(Collectors.toList()); + .map(nodeTimeseries -> nodeTimeseries.keepGenerationAfterWarmup(currentGeneration, + clusterNodes.node(nodeTimeseries.hostname()))) + .toList(); } public static ClusterNodesTimeseries empty() { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java index 9eefd4e60b7..d132d574658 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java @@ -70,11 +70,11 @@ public class MemoryMetricsDb implements MetricsDb { Instant startTime = clock().instant().minus(period); synchronized (lock) { if (hostnames.isEmpty()) - return nodeTimeseries.values().stream().map(ns -> ns.keepAfter(startTime)).collect(Collectors.toList()); + return nodeTimeseries.values().stream().map(ns -> ns.keepAfter(startTime)).toList(); else return hostnames.stream() .map(hostname -> nodeTimeseries.getOrDefault(hostname, new NodeTimeseries(hostname, List.of())).keepAfter(startTime)) - .collect(Collectors.toList()); + .toList(); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java index 500dbf0f66f..c25b0684f5a 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/NodeTimeseries.java @@ -1,6 +1,9 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.autoscale; +import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.node.History; + import java.time.Instant; import java.util.ArrayList; import java.util.Collections; @@ -69,28 +72,44 @@ public class NodeTimeseries { public NodeTimeseries keep(Predicate<NodeMetricSnapshot> filter) { return new NodeTimeseries(hostname, snapshots.stream() .filter(snapshot -> filter.test(snapshot)) - .collect(Collectors.toList())); + .toList()); } public NodeTimeseries keepAfter(Instant oldestTime) { return new NodeTimeseries(hostname, snapshots.stream() .filter(snapshot -> snapshot.at().equals(oldestTime) || snapshot.at().isAfter(oldestTime)) - .collect(Collectors.toList())); + .toList()); } - public NodeTimeseries keepCurrentGenerationAfterWarmup(long currentGeneration) { - Optional<Instant> generationChange = generationChange(currentGeneration); - return keep(snapshot -> isOnCurrentGenerationAfterWarmup(snapshot, currentGeneration, generationChange)); + public NodeTimeseries keepGenerationAfterWarmup(long generation, Optional<Node> node) { + Optional<Instant> generationChange = generationChange(generation); + return keep(snapshot -> isOnGenerationAfterWarmup(snapshot, node, generation, generationChange)); + } + private boolean isOnGenerationAfterWarmup(NodeMetricSnapshot snapshot, + Optional<Node> node, + long generation, + Optional<Instant> generationChange) { + if ( ! node.isPresent()) return false; // Node has been removed + if ( ! onAtLeastGeneration(generation, snapshot)) return false; + if (recentlyChangedGeneration(snapshot, generationChange)) return false; + if (recentlyCameUp(snapshot, node.get())) return false; + return true; } - private boolean isOnCurrentGenerationAfterWarmup(NodeMetricSnapshot snapshot, - long currentGeneration, - Optional<Instant> generationChange) { + private boolean onAtLeastGeneration(long generation, NodeMetricSnapshot snapshot) { if (snapshot.generation() < 0) return true; // Content nodes do not yet send generation - if (snapshot.generation() < currentGeneration) return false; - if (generationChange.isEmpty()) return true; - return ! snapshot.at().isBefore(generationChange.get().plus(warmupDuration)); + return snapshot.generation() >= generation; + } + + private boolean recentlyChangedGeneration(NodeMetricSnapshot snapshot, Optional<Instant> generationChange) { + if (generationChange.isEmpty()) return false; + return snapshot.at().isBefore(generationChange.get().plus(warmupDuration)); + } + + private boolean recentlyCameUp(NodeMetricSnapshot snapshot, Node node) { + Optional<History.Event> up = node.history().event(History.Event.Type.up); + return up.isPresent() && snapshot.at().isBefore(up.get().at().plus(warmupDuration)); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java index bf9c7bddda1..e0199b5ddaf 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/QuestMetricsDb.java @@ -176,7 +176,7 @@ public class QuestMetricsDb extends AbstractComponent implements MetricsDb { var snapshots = getNodeSnapshots(clock.instant().minus(period), hostnames, newContext()); return snapshots.entrySet().stream() .map(entry -> new NodeTimeseries(entry.getKey(), entry.getValue())) - .collect(Collectors.toList()); + .toList(); } catch (SqlException e) { throw new IllegalStateException("Could not read node timeseries data in Quest stored in " + dataDir, e); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java index 36e42fcf541..33c9edf694d 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerInstance.java @@ -24,11 +24,13 @@ public class LoadBalancerInstance { private final Set<Integer> ports; private final Set<String> networks; private final Set<Real> reals; - private final Optional<LoadBalancerSettings> settings; + private final LoadBalancerSettings settings; + private final Optional<PrivateServiceId> serviceId; private final CloudAccount cloudAccount; - public LoadBalancerInstance(Optional<DomainName> hostname, Optional<String> ipAddress, Optional<DnsZone> dnsZone, Set<Integer> ports, - Set<String> networks, Set<Real> reals, Optional<LoadBalancerSettings> settings, CloudAccount cloudAccount) { + public LoadBalancerInstance(Optional<DomainName> hostname, Optional<String> ipAddress, Optional<DnsZone> dnsZone, + Set<Integer> ports, Set<String> networks, Set<Real> reals, LoadBalancerSettings settings, + Optional<PrivateServiceId> serviceId, CloudAccount cloudAccount) { this.hostname = Objects.requireNonNull(hostname, "hostname must be non-null"); this.ipAddress = Objects.requireNonNull(ipAddress, "ip must be non-null"); this.dnsZone = Objects.requireNonNull(dnsZone, "dnsZone must be non-null"); @@ -36,6 +38,7 @@ public class LoadBalancerInstance { this.networks = ImmutableSortedSet.copyOf(Objects.requireNonNull(networks, "networks must be non-null")); this.reals = ImmutableSortedSet.copyOf(Objects.requireNonNull(reals, "targets must be non-null")); this.settings = Objects.requireNonNull(settings, "settings must be non-null"); + this.serviceId = Objects.requireNonNull(serviceId, "private service id must be non-null"); this.cloudAccount = Objects.requireNonNull(cloudAccount, "cloudAccount must be non-null"); if (hostname.isEmpty() == ipAddress.isEmpty()) { @@ -74,11 +77,16 @@ public class LoadBalancerInstance { return reals; } - /** Static user-configured settings of this load balancer, if set */ - public Optional<LoadBalancerSettings> settings() { + /** Static user-configured settings of this load balancer */ + public LoadBalancerSettings settings() { return settings; } + /** ID of any private endpoint service configured for this load balancer. */ + public Optional<PrivateServiceId> serviceId() { + return serviceId; + } + /** Cloud account of this load balancer */ public CloudAccount cloudAccount() { return cloudAccount; @@ -86,12 +94,11 @@ public class LoadBalancerInstance { /** Returns a copy of this with reals set to given reals */ public LoadBalancerInstance withReals(Set<Real> reals) { - return new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, settings, cloudAccount); + return new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, settings, serviceId, cloudAccount); } - /** Returns a copy of this with the given settings */ - public LoadBalancerInstance withSettings(LoadBalancerSettings settings) { - return new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, Optional.of(settings), cloudAccount); + public LoadBalancerInstance withServiceId(PrivateServiceId serviceId) { + return new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, settings, Optional.of(serviceId), cloudAccount); } private static Set<Integer> requirePorts(Set<Integer> ports) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java index 2b6f64012b1..e0dd41f9008 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java @@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.provision.lb; import ai.vespa.http.DomainName; import com.google.common.collect.ImmutableSet; import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.LoadBalancerSettings; import com.yahoo.config.provision.NodeType; import java.util.Collections; @@ -61,7 +62,8 @@ public class LoadBalancerServiceMock implements LoadBalancerService { Collections.singleton(4443), ImmutableSet.of("10.2.3.0/24", "10.4.5.0/24"), spec.reals(), - spec.settings(), + spec.settings().orElse(LoadBalancerSettings.empty), + spec.settings().map(__ -> PrivateServiceId.of("service")), spec.cloudAccount()); instances.put(id, instance); return instance; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PrivateServiceId.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PrivateServiceId.java new file mode 100644 index 00000000000..53c53d5a254 --- /dev/null +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PrivateServiceId.java @@ -0,0 +1,24 @@ +package com.yahoo.vespa.hosted.provision.lb; + +import ai.vespa.validation.PatternedStringWrapper; + +import java.util.regex.Pattern; + +/** + * ID of a private endpoint service, such as AWS's PrivateLink, or GCP's Private Service Connect. + * + * @author jonmv + */ +public class PrivateServiceId extends PatternedStringWrapper<PrivateServiceId> { + + static final Pattern pattern = Pattern.compile("[a-z0-9._-]{1,63}"); + + private PrivateServiceId(String value) { + super(value, pattern, "Private service ID"); + } + + public static PrivateServiceId of(String value) { + return new PrivateServiceId(value); + } + +} diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java index 13407a83f53..c8fb1226b81 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java @@ -37,7 +37,8 @@ public class SharedLoadBalancerService implements LoadBalancerService { Set.of(4443), Set.of(), spec.reals(), - spec.settings(), + spec.settings().orElse(LoadBalancerSettings.empty), + Optional.empty(), spec.cloudAccount()); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityChecker.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityChecker.java index e7e1e371047..603056856e2 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityChecker.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityChecker.java @@ -66,7 +66,7 @@ public class CapacityChecker { public List<Node> nodesFromHostnames(List<String> hostnames) { return hostnames.stream().filter(nodeMap::containsKey) .map(nodeMap::get) - .collect(Collectors.toList()); + .toList(); } @@ -79,7 +79,7 @@ public class CapacityChecker { var parentNames = hosts.stream().map(Node::hostname).collect(Collectors.toSet()); return allNodes.nodeType(NodeType.tenant).state(relevantNodeStates).stream() .filter(t -> parentNames.contains(t.parentHostname().orElse(""))) - .collect(Collectors.toList()); + .toList(); } private Optional<HostFailurePath> greedyHeuristicFindFailurePath(Map<Node, Integer> heuristic) { @@ -88,7 +88,7 @@ public class CapacityChecker { List<Node> parentRemovalPriorityList = heuristic.entrySet().stream() .sorted(this::hostMitigationOrder) .map(Map.Entry::getKey) - .collect(Collectors.toList()); + .toList(); for (int i = 1; i <= parentRemovalPriorityList.size(); i++) { List<Node> hostsToRemove = parentRemovalPriorityList.subList(0, i); @@ -170,7 +170,7 @@ public class CapacityChecker { Map.Entry::getKey, e -> e.getValue().stream() .map(Node::allocation).flatMap(Optional::stream) - .collect(Collectors.toList()) + .collect(Collectors.toCollection(ArrayList::new)) )); } @@ -189,7 +189,7 @@ public class CapacityChecker { .filter(h -> !hostsToRemove.contains(h)) .filter(host -> !host.status().wantToRetire() && !host.status().wantToFail()) - .collect(Collectors.toList()); + .toList(); if (validAllocationTargets.size() == 0) return Optional.of(HostRemovalFailure.none()); @@ -482,11 +482,11 @@ public class CapacityChecker { public AllocationFailureReasonList singularReasonFailures() { return new AllocationFailureReasonList(allocationFailureReasons.stream() - .filter(reason -> reason.numberOfReasons() == 1).collect(Collectors.toList())); + .filter(reason -> reason.numberOfReasons() == 1).toList()); } public AllocationFailureReasonList multipleReasonFailures() { return new AllocationFailureReasonList(allocationFailureReasons.stream() - .filter(reason -> reason.numberOfReasons() > 1).collect(Collectors.toList())); + .filter(reason -> reason.numberOfReasons() > 1).toList()); } public long size() { return allocationFailureReasons.size(); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java index c99c18262ec..d048f43973a 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java @@ -85,7 +85,7 @@ public class ExpeditedChangeApplicationMaintainer extends ApplicationMaintainer .map(event -> event.type() + (event.agent() == Agent.system ? "" : " by " + event.agent()))) .sorted() .distinct() - .collect(Collectors.toList()); + .toList(); return reasons.isEmpty() ? Optional.empty() : diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java index 2fb2f016c95..9e674c573da 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java @@ -86,7 +86,7 @@ public class FailedExpirer extends NodeRepositoryMaintainer { /** Recycle the nodes matching condition, and remove those nodes from the nodes list. */ private void recycleIf(Predicate<Node> condition, List<Node> failedNodes, NodeList allNodes) { - List<Node> nodesToRecycle = failedNodes.stream().filter(condition).collect(Collectors.toList()); + List<Node> nodesToRecycle = failedNodes.stream().filter(condition).toList(); failedNodes.removeAll(nodesToRecycle); recycle(nodesToRecycle, allNodes); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java index 7310fe63736..c9f53b69dc6 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java @@ -154,7 +154,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { return false; } }) - .collect(Collectors.toList()); + .toList(); } private static List<Node> candidatesForRemoval(List<Node> nodes) { @@ -295,7 +295,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { ClusterMembership.from(clusterSpec, index.next()), nodeResources, nodeRepository().clock().instant())) - .collect(Collectors.toList()); + .toList(); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostRetirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostRetirer.java index bdc9f402d37..7e238470a0c 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostRetirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostRetirer.java @@ -47,7 +47,7 @@ public class HostRetirer extends NodeRepositoryMaintainer { .map(Node::cloudAccount) .filter(cloudAccount -> !cloudAccount.isUnspecified()) .distinct() - .collect(Collectors.toList()); + .toList(); Map<String, List<HostEvent>> eventsByHostId = hostProvisioner.hostEventsIn(cloudAccounts).stream() .collect(Collectors.groupingBy(HostEvent::hostId)); Instant now = nodeRepository().clock().instant(); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java index 0344c05c1c1..0264d0df837 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java @@ -112,9 +112,9 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer { attempts.add(1); LOG.log(Level.INFO, () -> "Removing reals from inactive load balancer " + lb.id() + ": " + Sets.difference(lb.instance().get().reals(), reals)); service.create(new LoadBalancerSpec(lb.id().application(), lb.id().cluster(), reals, - null, lb.instance().get().cloudAccount()), + lb.instance().get().settings(), lb.instance().get().cloudAccount()), true); - db.writeLoadBalancer(lb.with(lb.instance().map(instance -> instance.withReals(reals).withSettings(lb.instance().get().settings().get()))), lb.state()); + db.writeLoadBalancer(lb.with(lb.instance().map(instance -> instance.withReals(reals))), lb.state()); } catch (Exception e) { failed.add(lb.id()); lastException.set(e); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java index 32eac49a288..203bb664c1c 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java @@ -137,7 +137,7 @@ public class NodeFailer extends NodeRepositoryMaintainer { .filter(report -> report.getType().hostShouldBeFailed()) // The generated string is built from the report's ID, created time, and description only. .map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription()) - .collect(Collectors.toList()); + .toList(); } /** Returns whether node has any kind of hardware issue */ @@ -170,16 +170,11 @@ public class NodeFailer extends NodeRepositoryMaintainer { * But we refuse to fail out config(host)/controller(host) */ private boolean failAllowedFor(NodeType nodeType) { - switch (nodeType) { - case tenant: - case host: - return true; - case proxy: - case proxyhost: - return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty(); - default: - return false; - } + return switch (nodeType) { + case tenant, host -> true; + case proxy, proxyhost -> nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty(); + default -> false; + }; } /** @@ -286,7 +281,7 @@ public class NodeFailer extends NodeRepositoryMaintainer { public enum ThrottlePolicy { - hosted(Duration.ofDays(1), 0.03, 2), + hosted(Duration.ofDays(1), 0.04, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java index 3cd97c64e4d..27945fad7f5 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java @@ -42,7 +42,7 @@ public class NodeRebooter extends NodeRepositoryMaintainer { List<Node> nodesToReboot = nodeRepository().nodes().list(Node.State.active, Node.State.ready).stream() .filter(node -> node.type().isHost()) .filter(this::shouldReboot) - .collect(Collectors.toList()); + .toList(); if (!nodesToReboot.isEmpty()) nodeRepository().nodes().reboot(NodeListFilter.from(nodesToReboot)); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/History.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/History.java index 7da10ce085a..93b28d07471 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/History.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/History.java @@ -136,7 +136,7 @@ public class History { * This returns a copy of this history with all application level events removed and the log unchanged. */ private History withoutApplicationEvents() { - return new History(events().stream().filter(e -> ! e.type().isApplicationLevel()).collect(Collectors.toList()), log); + return new History(events().stream().filter(e -> ! e.type().isApplicationLevel()).toList(), log); } /** Returns the empty history */ diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java index 54e48f881cd..2693fff3b39 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java @@ -380,7 +380,7 @@ public class IP { String hostname6 = resolver.resolveHostname(ipv6Address).orElseThrow(() -> new IllegalArgumentException("Could not resolve IP address: " + ipv6Address)); List<String> ipv4Addresses = resolver.resolveAll(hostname6).stream() .filter(IP::isV4) - .collect(Collectors.toList()); + .toList(); if (ipv4Addresses.size() > 1) { throw new IllegalArgumentException("Hostname " + hostname6 + " resolved to more than 1 IPv4 address: " + ipv4Addresses); } @@ -407,7 +407,7 @@ public class IP { String hostname4 = resolver.resolveHostname(ipAddress).orElseThrow(() -> new IllegalArgumentException("Could not resolve IP address: " + ipAddress)); List<String> addresses = resolver.resolveAll(hostname4).stream() .filter(IP::isV4) - .collect(Collectors.toList()); + .toList(); if (addresses.size() != 1) { throw new IllegalArgumentException("Hostname " + hostname4 + " did not resolve to exactly 1 address. " + "Resolved: " + addresses); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java index b98c9c97437..345dccf5faf 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java @@ -264,7 +264,7 @@ public class Nodes { private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) - .collect(Collectors.toList()); + .toList(); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } @@ -290,7 +290,7 @@ public class Nodes { illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); - return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); + return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).toList(); } /** @@ -305,7 +305,7 @@ public class Nodes { } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { - return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); + return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).toList(); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { @@ -419,7 +419,7 @@ public class Nodes { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, false, reason, transaction)) - .collect(Collectors.toList()); + .collect(Collectors.toCollection(ArrayList::new)); moved.add(move(hostname, toState, agent, false, reason, transaction)); transaction.commit(); return moved; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java index 2289ba4a0ea..74088e8d269 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java @@ -175,7 +175,7 @@ public class ApplicationSerializer { } private static List<ScalingEvent> scalingEventsFromSlime(Inspector eventArray) { - return SlimeUtils.entriesStream(eventArray).map(item -> scalingEventFromSlime(item)).collect(Collectors.toList()); + return SlimeUtils.entriesStream(eventArray).map(item -> scalingEventFromSlime(item)).toList(); } private static void toSlime(ScalingEvent event, Cursor object) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabase.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabase.java index 5fe9eef7e73..aa935eaf272 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabase.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabase.java @@ -63,7 +63,7 @@ public class CuratorDatabase { .filter(hostAndPort -> !hostAndPort.isEmpty()) .map(hostAndPort -> hostAndPort.split(":")[0]) .map(HostName::of) - .collect(Collectors.toList()); + .toList(); } /** Create a reentrant lock */ diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java index 7f24f5e862b..47975c8354a 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java @@ -349,7 +349,7 @@ public class CuratorDatabaseClient { public List<ApplicationId> readApplicationIds() { return db.getChildren(applicationsPath).stream() .map(ApplicationId::fromSerializedForm) - .collect(Collectors.toList()); + .toList(); } public Optional<Application> readApplication(ApplicationId id) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java index 22f6ec97107..3d352f5596b 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java @@ -13,6 +13,7 @@ import com.yahoo.vespa.hosted.provision.lb.DnsZone; import com.yahoo.vespa.hosted.provision.lb.LoadBalancer; import com.yahoo.vespa.hosted.provision.lb.LoadBalancerId; import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance; +import com.yahoo.vespa.hosted.provision.lb.PrivateServiceId; import com.yahoo.vespa.hosted.provision.lb.Real; import java.io.IOException; @@ -48,6 +49,7 @@ public class LoadBalancerSerializer { private static final String realsField = "reals"; private static final String ipAddressField = "ipAddress"; private static final String portField = "port"; + private static final String serviceIdField = "serviceId"; private static final String cloudAccountField = "cloudAccount"; private static final String settingsField = "settings"; private static final String allowedUrnsField = "allowedUrns"; @@ -75,10 +77,13 @@ public class LoadBalancerSerializer { })); loadBalancer.instance() .map(LoadBalancerInstance::settings) - .filter(settings -> ! settings.get().isEmpty()) - .ifPresent(settings -> settings.get().allowedUrns().forEach(root.setObject(settingsField) + .filter(settings -> ! settings.isEmpty()) + .ifPresent(settings -> settings.allowedUrns().forEach(root.setObject(settingsField) .setArray(allowedUrnsField)::addString)); loadBalancer.instance() + .flatMap(LoadBalancerInstance::serviceId) + .ifPresent(serviceId -> root.setString(serviceIdField, serviceId.value())); + loadBalancer.instance() .map(LoadBalancerInstance::cloudAccount) .filter(cloudAccount -> !cloudAccount.isUnspecified()) .ifPresent(cloudAccount -> root.setString(cloudAccountField, cloudAccount.value())); @@ -110,9 +115,10 @@ public class LoadBalancerSerializer { Optional<String> ipAddress = optionalString(object.field(lbIpAddressField), Function.identity()).filter(s -> !s.isEmpty()); Optional<DnsZone> dnsZone = optionalString(object.field(dnsZoneField), DnsZone::new); LoadBalancerSettings settings = loadBalancerSettings(object.field(settingsField)); + Optional<PrivateServiceId> serviceId = optionalString(object.field(serviceIdField), PrivateServiceId::of); CloudAccount cloudAccount = optionalString(object.field(cloudAccountField), CloudAccount::from).orElse(CloudAccount.empty); Optional<LoadBalancerInstance> instance = hostname.isEmpty() && ipAddress.isEmpty() ? Optional.empty() : - Optional.of(new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, Optional.of(settings), cloudAccount)); + Optional.of(new LoadBalancerInstance(hostname, ipAddress, dnsZone, ports, networks, reals, settings, serviceId, cloudAccount)); return new LoadBalancer(LoadBalancerId.fromSerializedForm(object.field(idField).asString()), instance, diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java index 3ee25c23f13..94707f7f429 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java @@ -400,14 +400,14 @@ public class NodeSerializer { private List<Address> addressesFromSlime(Inspector object) { return SlimeUtils.entriesStream(object.field(containersKey)) .map(elem -> new Address(elem.field(containerHostnameKey).asString())) - .collect(Collectors.toList()); + .toList(); } private List<TrustStoreItem> trustedCertificatesFromSlime(Inspector object) { return SlimeUtils.entriesStream(object.field(trustedCertificatesKey)) .map(elem -> new TrustStoreItem(elem.field(fingerprintKey).asString(), Instant.ofEpochMilli(elem.field(expiresKey).asLong()))) - .collect(Collectors.toList()); + .toList(); } // ----------------- Enum <-> string mappings ---------------------------------------- diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java index 5e01ba5b0a6..40bad7022d6 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java @@ -118,7 +118,7 @@ public class GroupPreparer { List<NodeCandidate> candidates = provisionedHosts.stream() .map(host -> NodeCandidate.createNewExclusiveChild(host.generateNode(), host.generateHost())) - .collect(Collectors.toList()); + .toList(); allocation.offer(candidates); }; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImpl.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImpl.java index 605ef280c2e..178ea6ed514 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImpl.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImpl.java @@ -116,7 +116,7 @@ public class InfraDeployerImpl implements InfraDeployer { duperModel.infraApplicationActivated( application.getApplicationId(), - hostSpecs.stream().map(HostSpec::hostname).map(HostName::of).collect(Collectors.toList())); + hostSpecs.stream().map(HostSpec::hostname).map(HostName::of).toList()); logger.log(Level.FINE, () -> generateActivationLogMessage(hostSpecs, application.getApplicationId())); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java index 17fc83bce80..8794d0b373e 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java @@ -34,6 +34,7 @@ import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.function.Function; @@ -41,6 +42,7 @@ import java.util.logging.Level; import java.util.logging.Logger; import java.util.stream.Collectors; +import static java.util.Objects.requireNonNullElse; import static java.util.stream.Collectors.groupingBy; import static java.util.stream.Collectors.reducing; @@ -94,7 +96,7 @@ public class LoadBalancerProvisioner { ClusterSpec.Id clusterId = effectiveId(cluster); LoadBalancerId loadBalancerId = requireNonClashing(new LoadBalancerId(application, clusterId)); NodeList nodes = nodesOf(clusterId, application); - prepare(loadBalancerId, nodes, cluster.loadBalancerSettings(), requestedNodes.cloudAccount()); + prepare(loadBalancerId, nodes, requestedNodes.cloudAccount()); } } @@ -109,18 +111,21 @@ public class LoadBalancerProvisioner { * Calling this when no load balancer has been prepared for given cluster is a no-op. */ public void activate(Set<ClusterSpec> clusters, NodeList newActive, ApplicationTransaction transaction) { - Set<ClusterSpec.Id> activatingClusters = clusters.stream() - .map(LoadBalancerProvisioner::effectiveId) - .collect(Collectors.toSet()); + Map<ClusterSpec.Id, LoadBalancerSettings> activatingClusters = clusters.stream() + .collect(groupingBy(LoadBalancerProvisioner::effectiveId, + reducing(LoadBalancerSettings.empty, + ClusterSpec::loadBalancerSettings, + (o, n) -> o.isEmpty() ? n : o))); for (var cluster : loadBalancedClustersOf(newActive).entrySet()) { - if ( ! activatingClusters.contains(cluster.getKey())) continue; + if ( ! activatingClusters.containsKey(cluster.getKey())) + continue; Node clusterNode = cluster.getValue().first().get(); if ( ! shouldProvision(transaction.application(), clusterNode.type(), clusterNode.allocation().get().membership().cluster().type())) continue; - activate(transaction, cluster.getKey(), cluster.getValue()); + activate(transaction, cluster.getKey(), activatingClusters.get(cluster.getKey()), cluster.getValue()); } // Deactivate any surplus load balancers, i.e. load balancers for clusters that have been removed - var surplusLoadBalancers = surplusLoadBalancersOf(transaction.application(), activatingClusters); + var surplusLoadBalancers = surplusLoadBalancersOf(transaction.application(), activatingClusters.keySet()); deactivate(surplusLoadBalancers, transaction.nested()); } @@ -159,7 +164,7 @@ public class LoadBalancerProvisioner { var now = nodeRepository.clock().instant(); var deactivatedLoadBalancers = loadBalancers.stream() .map(lb -> lb.with(LoadBalancer.State.inactive, now)) - .collect(Collectors.toList()); + .toList(); db.writeLoadBalancers(deactivatedLoadBalancers, LoadBalancer.State.active, transaction); } @@ -185,10 +190,10 @@ public class LoadBalancerProvisioner { return loadBalancerId; } - private void prepare(LoadBalancerId id, NodeList nodes, LoadBalancerSettings loadBalancerSettings, CloudAccount cloudAccount) { + private void prepare(LoadBalancerId id, NodeList nodes, CloudAccount cloudAccount) { Instant now = nodeRepository.clock().instant(); Optional<LoadBalancer> loadBalancer = db.readLoadBalancer(id); - Optional<LoadBalancerInstance> instance = provisionInstance(id, nodes, loadBalancer, loadBalancerSettings, cloudAccount, false); + Optional<LoadBalancerInstance> instance = provisionInstance(id, nodes, loadBalancer, null, cloudAccount); LoadBalancer newLoadBalancer; LoadBalancer.State fromState = null; if (loadBalancer.isEmpty()) { @@ -207,17 +212,14 @@ public class LoadBalancerProvisioner { requireInstance(id, instance, cloudAccount); } - private void activate(ApplicationTransaction transaction, ClusterSpec.Id cluster, NodeList nodes) { + private void activate(ApplicationTransaction transaction, ClusterSpec.Id cluster, LoadBalancerSettings settings, NodeList nodes) { Instant now = nodeRepository.clock().instant(); LoadBalancerId id = new LoadBalancerId(transaction.application(), cluster); Optional<LoadBalancer> loadBalancer = db.readLoadBalancer(id); if (loadBalancer.isEmpty()) throw new IllegalArgumentException("Could not activate load balancer that was never prepared: " + id); if (loadBalancer.get().instance().isEmpty()) throw new IllegalArgumentException("Activating " + id + ", but prepare never provisioned a load balancer instance"); - Optional<LoadBalancerInstance> instance = provisionInstance(id, nodes, loadBalancer, - loadBalancer.get().instance().get().settings().orElseThrow(), - loadBalancer.get().instance().get().cloudAccount(), - true); + Optional<LoadBalancerInstance> instance = provisionInstance(id, nodes, loadBalancer, settings, loadBalancer.get().instance().get().cloudAccount()); LoadBalancer.State state = instance.isPresent() ? LoadBalancer.State.active : loadBalancer.get().state(); LoadBalancer newLoadBalancer = loadBalancer.get().with(instance).with(state, now); db.writeLoadBalancers(List.of(newLoadBalancer), loadBalancer.get().state(), transaction.nested()); @@ -228,8 +230,7 @@ public class LoadBalancerProvisioner { private Optional<LoadBalancerInstance> provisionInstance(LoadBalancerId id, NodeList nodes, Optional<LoadBalancer> currentLoadBalancer, LoadBalancerSettings loadBalancerSettings, - CloudAccount cloudAccount, - boolean activate) { + CloudAccount cloudAccount) { boolean shouldDeactivateRouting = deactivateRouting.with(FetchVector.Dimension.APPLICATION_ID, id.application().serializedForm()) .value(); @@ -239,14 +240,20 @@ public class LoadBalancerProvisioner { } else { reals = realsOf(nodes); } - LoadBalancerSettings settingsToUse = activate ? loadBalancerSettings : null; - if (isUpToDate(currentLoadBalancer, reals, settingsToUse)) - return currentLoadBalancer.get().instance().map(instance -> instance.withSettings(loadBalancerSettings)); + if (isUpToDate(currentLoadBalancer, reals, loadBalancerSettings)) + return currentLoadBalancer.get().instance(); log.log(Level.INFO, () -> "Provisioning instance for " + id + ", targeting: " + reals); try { - return Optional.of(service.create(new LoadBalancerSpec(id.application(), id.cluster(), reals, settingsToUse, cloudAccount), - shouldDeactivateRouting || allowEmptyReals(currentLoadBalancer)) - .withSettings(loadBalancerSettings)); + // Override settings at activation, otherwise keep existing ones. + LoadBalancerSettings settings = loadBalancerSettings != null ? loadBalancerSettings + : currentLoadBalancer.flatMap(LoadBalancer::instance) + .map(LoadBalancerInstance::settings) + .orElse(null); + LoadBalancerInstance created = service.create(new LoadBalancerSpec(id.application(), id.cluster(), reals, settings, cloudAccount), + shouldDeactivateRouting || allowEmptyReals(currentLoadBalancer)); + if (created.serviceId().isEmpty() && currentLoadBalancer.flatMap(LoadBalancer::instance).flatMap(LoadBalancerInstance::serviceId).isPresent()) + created = created.withServiceId(currentLoadBalancer.flatMap(LoadBalancer::instance).flatMap(LoadBalancerInstance::serviceId).get()); + return Optional.of(created); } catch (Exception e) { log.log(Level.WARNING, e, () -> "Could not (re)configure " + id + ", targeting: " + reals + ". The operation will be retried on next deployment"); @@ -306,7 +313,7 @@ public class LoadBalancerProvisioner { if (loadBalancer.isEmpty()) return false; if (loadBalancer.get().instance().isEmpty()) return false; return loadBalancer.get().instance().get().reals().equals(reals) - && (loadBalancerSettings == null || loadBalancer.get().instance().get().settings().get().equals(loadBalancerSettings)); + && (loadBalancerSettings == null || loadBalancer.get().instance().get().settings().equals(loadBalancerSettings)); } /** Returns whether to allow given load balancer to have no reals */ diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java index 8d350e304a2..61ca7c914c0 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java @@ -410,7 +410,7 @@ class NodeAllocation { nodes.put(candidate.toNode().hostname(), candidate); } - return nodes.values().stream().map(n -> n.toNode()).collect(Collectors.toList()); + return nodes.values().stream().map(n -> n.toNode()).toList(); } List<Node> reservableNodes() { @@ -427,7 +427,7 @@ class NodeAllocation { return nodes.values().stream() .filter(predicate) .map(n -> n.toNode()) - .collect(Collectors.toList()); + .toList(); } /** Returns the number of nodes accepted this far */ @@ -440,7 +440,7 @@ class NodeAllocation { /** Prefer to retire nodes we want the least */ private List<NodeCandidate> byRetiringPriority(Collection<NodeCandidate> candidates) { - return candidates.stream().sorted(Comparator.reverseOrder()).collect(Collectors.toList()); + return candidates.stream().sorted(Comparator.reverseOrder()).toList(); } /** Prefer to unretire nodes we don't want to retire, and otherwise those with lower index */ @@ -448,7 +448,7 @@ class NodeAllocation { return candidates.stream() .sorted(Comparator.comparing(NodeCandidate::wantToRetire) .thenComparing(n -> n.allocation().get().membership().index())) - .collect(Collectors.toList()); + .toList(); } public String allocationFailureDetails() { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java index 820a654c620..139e8848ab1 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java @@ -78,7 +78,7 @@ class Preparer { NodeList activeHosts = allNodesAndHosts.nodes().state(Node.State.active).parents().nodeType(requestedNodes.type().hostType()); accepted = accepted.stream() .filter(node -> node.parentHostname().isEmpty() || activeHosts.parentOf(node).isPresent()) - .collect(Collectors.toList()); + .toList(); } replace(acceptedNodes, accepted); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersResponse.java index 879c8e6a9ee..fdf69b60690 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LoadBalancersResponse.java @@ -75,6 +75,11 @@ public class LoadBalancersResponse extends SlimeJsonResponse { realObject.setLong("port", real.port()); }); }); + lb.instance().ifPresent(instance -> { + if ( ! instance.settings().isEmpty()) + instance.settings().allowedUrns().forEach(lbObject.setObject("settings").setArray("allowedUrns")::addString); + instance.serviceId().ifPresent(serviceId -> lbObject.setString("serviceId", serviceId.value())); + }); lb.instance() .map(LoadBalancerInstance::cloudAccount) .filter(cloudAccount -> !cloudAccount.isUnspecified()) diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LocksResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LocksResponse.java index 8a66a9394b2..42904bb6d68 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LocksResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/LocksResponse.java @@ -77,7 +77,7 @@ public class LocksResponse extends HttpResponse { List<RecordedLockAttempts> historicRecordings = LockStats.getGlobal().getHistoricRecordings().stream() .sorted(Comparator.comparing(RecordedLockAttempts::duration).reversed()) - .collect(Collectors.toList()); + .toList(); if (!historicRecordings.isEmpty()) { Cursor recordingsCursor = root.setArray("recordings"); historicRecordings.forEach(recording -> setRecording(recordingsCursor.addObject(), recording)); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java index e57868dac54..f77b98cc02c 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodePatcher.java @@ -294,7 +294,7 @@ public class NodePatcher { List<TrustStoreItem> trustStoreItems = SlimeUtils.entriesStream(inspector) .map(TrustStoreItem::fromSlime) - .collect(Collectors.toList()); + .toList(); return node.with(trustStoreItems); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java index 20a1621b1d2..63386449f0c 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java @@ -232,7 +232,7 @@ public class MockDeployer implements Deployer { return clusterContexts.stream() .map(clusterContext -> clusterContext.prepare(provisioner)) .flatMap(List::stream) - .collect(Collectors.toList()); + .toList(); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java index 15ee064b59f..20612e2ad66 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java @@ -184,7 +184,7 @@ public class MockHostProvisioner implements HostProvisioner { : hostType.childNodeType().name() + i; return new Address(hostname); }) - .collect(Collectors.toList()); + .toList(); } public Node withIpAssigned(Node node) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java index a19e48cfa6b..4794c0b48b5 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java @@ -14,6 +14,7 @@ import com.yahoo.config.provision.DockerImage; import com.yahoo.config.provision.Flavor; import com.yahoo.config.provision.HostSpec; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.LoadBalancerSettings; import com.yahoo.config.provision.NodeFlavors; import com.yahoo.config.provision.NodeResources; import com.yahoo.config.provision.NodeType; @@ -184,7 +185,7 @@ public class MockNodeRepository extends NodeRepository { activate(provisioner.prepare(zoneApp, zoneCluster, Capacity.fromRequiredNodeType(NodeType.host), null), zoneApp, provisioner); ApplicationId app1Id = ApplicationId.from(TenantName.from("tenant1"), ApplicationName.from("application1"), InstanceName.from("instance1")); - ClusterSpec cluster1Id = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("id1")).vespaVersion("6.42").build(); + ClusterSpec cluster1Id = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("id1")).vespaVersion("6.42").loadBalancerSettings(new LoadBalancerSettings(List.of("arne"))).build(); activate(provisioner.prepare(app1Id, cluster1Id, Capacity.from(new ClusterResources(2, 1, new NodeResources(2, 8, 50, 1)), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeListMicroBenchmarkTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeListMicroBenchmarkTest.java index b7f9dcb8e8a..85338bdb2b4 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeListMicroBenchmarkTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeListMicroBenchmarkTest.java @@ -38,7 +38,7 @@ public class NodeListMicroBenchmarkTest { public void testChildrenOf() { List<Node> nodes = createHosts(); - List<Node> childNodes = nodes.stream().map(host -> createNodes(host.hostname())).flatMap(Collection::stream).collect(Collectors.toList()); + List<Node> childNodes = nodes.stream().map(host -> createNodes(host.hostname())).flatMap(Collection::stream).toList(); nodes.addAll(childNodes); NodeList nodeList = new NodeList(nodes, false); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java index 3dd76c76cac..e569e9b0382 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java @@ -120,7 +120,7 @@ public class RealDataScenarioTest { List<HostSpec> hostSpecs = IntStream.range(0, capacities.length) .mapToObj(i -> tester.provisioner().prepare(app, specs[i], capacities[i], log::log)) .flatMap(Collection::stream) - .collect(Collectors.toList()); + .toList(); NestedTransaction transaction = new NestedTransaction(); tester.provisioner().activate(hostSpecs, new ActivationContext(0), new ApplicationTransaction(new ProvisionLock(app, () -> {}), transaction)); transaction.commit(); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java index fc837ee54b4..ff72d22bb39 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java @@ -32,7 +32,7 @@ public class AutoscalingTest { fixture.loader().applyCpuLoad(0.7f, 10); var scaledResources = fixture.tester().assertResources("Scaling up since resource usage is too high", - 9, 1, 3.6, 8.3, 37.7, + 7, 1, 4.6, 11.1, 55.1, fixture.autoscale()); fixture.deploy(Capacity.from(scaledResources)); @@ -49,7 +49,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(Duration.ofDays(2)); fixture.loader().applyCpuLoad(0.1f, 10); fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly", - 8, 1, 1.0, 8.5, 38.5, + 6, 1, 1.3, 11.8, 78.6, fixture.autoscale()); } @@ -73,7 +73,7 @@ public class AutoscalingTest { fixture.loader().applyLoad(new Load(0.1, 0.1, 0.1), 3); fixture.loader().applyLoad(new Load(1.0, 1.0, 1.0), 1); fixture.tester().assertResources("Scaling up since resource usage is too high", - 8, 1, 5.3, 17.7, 89.4, + 8, 1, 5.3, 17.7, 93.6, fixture.autoscale()); } @@ -92,13 +92,29 @@ public class AutoscalingTest { fixture.currentResources().advertisedResources()); } + @Test + public void initial_deployment_with_host_sharing_flag_and_too_small_min() { + var min = new ClusterResources(1, 1, new NodeResources(0.5, 4.0, 10, 0.1)); + var max = new ClusterResources(1, 1, new NodeResources(2.0, 8.0, 50, 0.1)); + var fixture = AutoscalingTester.fixture() + .awsSetup(false, Environment.test) + .clusterType(ClusterSpec.Type.container) + .capacity(Capacity.from(min, max)) + .initialResources(Optional.empty()) + .hostSharingFlag() + .build(); + fixture.tester().assertResources("Initial resources at min, since flag turns on host sharing", + 1, 1, 0.5, 4.0, 10.0, + fixture.currentResources().advertisedResources()); + } + /** When scaling up, disregard underutilized dimensions (memory here) */ @Test public void test_only_autoscaling_up_quickly() { var fixture = AutoscalingTester.fixture().awsProdSetup(true).build(); fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10); fixture.tester().assertResources("Scaling up (only) since resource usage is too high", - 8, 1, 7.1, 9.5, 89.4, + 7, 1, 8.2, 10.7, 99.5, fixture.autoscale()); } @@ -109,7 +125,7 @@ public class AutoscalingTest { fixture.tester.clock().advance(Duration.ofDays(2)); fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10); fixture.tester().assertResources("Scaling cpu and disk up and memory down", - 7, 1, 8.2, 4.0, 104.1, + 7, 1, 8.2, 4.0, 99.5, fixture.autoscale()); } @@ -119,7 +135,7 @@ public class AutoscalingTest { fixture.tester.clock().advance(Duration.ofDays(2)); fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10); fixture.tester().assertResources("Scaling cpu and disk up, memory follows", - 16, 1, 4, 8.0, 41.1, + 16, 1, 4, 8.0, 28.3, fixture.autoscale()); } @@ -130,7 +146,7 @@ public class AutoscalingTest { fixture.loader().applyCpuLoad(0.70, 1); fixture.loader().applyCpuLoad(0.01, 100); fixture.tester().assertResources("Scaling up since peak resource usage is too high", - 9, 1, 3.8, 8.3, 37.7, + 8, 1, 4.3, 9.5, 47.2, fixture.autoscale()); } @@ -141,7 +157,7 @@ public class AutoscalingTest { fixture.loader().applyCpuLoad(0.70, 1); fixture.loader().applyCpuLoad(0.01, 100); fixture.tester().assertResources("Scaling up since peak resource usage is too high", - 10, 1, 4, 8.0, 32.9, + 10, 1, 4, 8.0, 22.7, fixture.autoscale()); } @@ -180,13 +196,13 @@ public class AutoscalingTest { fixture.loader().applyCpuLoad(0.25f, 120); ClusterResources scaledResources = fixture.tester().assertResources("Scaling cpu up", - 4, 1, 3.3, 13.3, 60.3, + 3, 1, 5, 13.3, 66.1, fixture.autoscale()); fixture.deploy(Capacity.from(scaledResources)); fixture.deactivateRetired(Capacity.from(scaledResources)); fixture.loader().applyCpuLoad(0.1f, 120); fixture.tester().assertResources("Scaling down since cpu usage has gone down", - 3, 1, 2.5, 10.0, 45.3, + 3, 1, 2.5, 9.2, 61.1, fixture.autoscale()); } @@ -224,7 +240,7 @@ public class AutoscalingTest { @Test public void autoscaling_target_preserves_any() { - NodeResources resources = new NodeResources(1, 10, 10, 1); + NodeResources resources = new NodeResources(1, 100, 100, 1); var capacity = Capacity.from(new ClusterResources( 2, 1, resources.with(DiskSpeed.any)), new ClusterResources( 10, 1, resources.with(DiskSpeed.any))); var fixture = AutoscalingTester.fixture() @@ -272,7 +288,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(Duration.ofDays(2)); fixture.loader().applyLoad(new Load(0.05f, 0.05f, 0.05f), 120); fixture.tester().assertResources("Scaling down to limit since resource usage is low", - 4, 1, 1.8, 7.4, 10.6, + 4, 1, 1.8, 7.4, 23.5, fixture.autoscale()); } @@ -359,7 +375,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(Duration.ofDays(2)); fixture.loader().applyCpuLoad(1.0, 120); fixture.tester().assertResources("Suggesting above capacity limit", - 8, 1, 6.2, 7.6, 34.3, + 8, 1, 6.2, 7.6, 37.8, fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min)); } @@ -370,7 +386,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(Duration.ofDays(2)); fixture.loader().applyCpuLoad(1.0, 120); fixture.tester().assertResources("Suggesting above capacity limit", - 13, 1, 4, 8, 19.7, + 13, 1, 4, 8, 13.6, fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min)); } @@ -405,7 +421,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(Duration.ofDays(2)); fixture.loader().applyCpuLoad(0.9, 120); fixture.tester().assertResources("Scaling up to 2 nodes, scaling memory and disk down at the same time", - 10, 5, 7.7, 40.6, 40.1, + 10, 5, 7.7, 40.6, 47.8, fixture.autoscale()); } @@ -424,7 +440,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(timePassed.negated()); fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 1.0); fixture.tester().assertResources("Scaling up cpu, others down, changing to 1 group is cheaper", - 8, 1, 2.8, 36.2, 36, + 8, 1, 2.8, 36.2, 56.4, fixture.autoscale()); } @@ -444,7 +460,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(timePassed.negated()); fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 20.0 : 10.0, t -> 100.0); fixture.tester().assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper", - 6, 1, 1.0, 50.7, 50.4, + 6, 1, 1.0, 50.7, 79.0, fixture.autoscale()); } @@ -461,7 +477,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(Duration.ofDays(1)); fixture.loader().applyMemLoad(1.0, 1000); fixture.tester().assertResources("Increase group size to reduce memory load", - 8, 2, 4.5, 97.1, 62.7, + 8, 2, 4.5, 97.1, 74.7, fixture.autoscale()); } @@ -478,7 +494,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(Duration.ofDays(2)); fixture.loader().applyLoad(new Load(0.16, 0.02, 0.5), 120); fixture.tester().assertResources("Scaling down memory", - 6, 1, 3.0, 4.2, 100.8, + 6, 1, 3.0, 4.2, 139.9, fixture.autoscale()); } @@ -490,7 +506,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(Duration.ofDays(2)); fixture.loader().applyCpuLoad(0.02, 120); fixture.tester().assertResources("Scaling down since enough time has passed", - 4, 1, 1.0, 17.2, 80.4, + 3, 1, 1.0, 25.8, 147.4, fixture.autoscale()); } @@ -507,20 +523,20 @@ public class AutoscalingTest { fixture.loader().applyCpuLoad(0.25, 120); // (no read share stored) fixture.tester().assertResources("Advice to scale up since we set aside for bcp by default", - 5, 1, 3, 100, 100, + 6, 1, 3, 100, 100, fixture.autoscale()); fixture.loader().applyCpuLoad(0.25, 120); fixture.storeReadShare(0.25, 0.5); fixture.tester().assertResources("Half of global share is the same as the default assumption used above", - 5, 1, 3, 100, 100, + 6, 1, 3, 100, 100, fixture.autoscale()); fixture.tester.clock().advance(Duration.ofDays(1)); fixture.loader().applyCpuLoad(0.25, 120); fixture.storeReadShare(0.5, 0.5); fixture.tester().assertResources("Advice to scale down since we don't need room for bcp", - 4, 1, 3, 100, 100, + 5, 1, 3, 100, 100, fixture.autoscale()); } @@ -534,7 +550,7 @@ public class AutoscalingTest { fixture.loader().addCpuMeasurements(0.25, 200); fixture.tester().assertResources("Scale up since we assume we need 2x cpu for growth when no data scaling time data", - 7, 1, 1.8, 8.9, 40.4, + 6, 1, 2.1, 10.6, 66.5, fixture.autoscale()); fixture.setScalingDuration(Duration.ofMinutes(5)); @@ -543,7 +559,7 @@ public class AutoscalingTest { fixture.tester.clock().advance(timeAdded.negated()); fixture.loader().addCpuMeasurements(0.25, 200); fixture.tester().assertResources("Scale down since observed growth is slower than scaling time", - 7, 1, 1.5, 8.9, 40.4, + 5, 1, 2.2, 13.3, 83.2, fixture.autoscale()); fixture.setScalingDuration(Duration.ofMinutes(60)); @@ -554,7 +570,7 @@ public class AutoscalingTest { fixture.tester.clock().advance(timeAdded.negated()); fixture.loader().addCpuMeasurements(0.25, 200); fixture.tester().assertResources("Scale up since observed growth is faster than scaling time", - 7, 1, 1.8, 8.9, 40.4, + 6, 1, 2.1, 10.6, 66.5, fixture.autoscale()); } @@ -572,7 +588,7 @@ public class AutoscalingTest { fixture.tester.clock().advance(timeAdded.negated()); fixture.loader().addCpuMeasurements(0.4, 200); fixture.tester.assertResources("Query and write load is equal -> scale up somewhat", - 7, 1, 2, 8.9, 40.2, + 7, 1, 2, 8.9, 55.5, fixture.autoscale()); fixture.tester().clock().advance(Duration.ofDays(2)); @@ -581,7 +597,7 @@ public class AutoscalingTest { fixture.loader().addCpuMeasurements(0.4, 200); // TODO: Ackhually, we scale down here - why? fixture.tester().assertResources("Query load is 4x write load -> scale up more", - 7, 1, 1.8, 8.9, 40.4, + 6, 1, 2.1, 10.6, 66.5, fixture.autoscale()); fixture.tester().clock().advance(Duration.ofDays(2)); @@ -589,7 +605,7 @@ public class AutoscalingTest { fixture.tester.clock().advance(timeAdded.negated()); fixture.loader().addCpuMeasurements(0.4, 200); fixture.tester().assertResources("Write load is 10x query load -> scale down", - 6, 1, 1.1, 10.6, 48.5, + 5, 1, 1.4, 13.3, 83.2, fixture.autoscale()); fixture.tester().clock().advance(Duration.ofDays(2)); @@ -597,7 +613,7 @@ public class AutoscalingTest { fixture.tester.clock().advance(timeAdded.negated()); fixture.loader().addCpuMeasurements(0.4, 200); fixture.tester().assertResources("Query only -> largest possible", - 7, 1, 3.5, 8.9, 40.2, + 7, 1, 3.5, 8.9, 55.5, fixture.autoscale()); fixture.tester().clock().advance(Duration.ofDays(2)); @@ -605,7 +621,7 @@ public class AutoscalingTest { fixture.tester.clock().advance(timeAdded.negated()); fixture.loader().addCpuMeasurements(0.4, 200); fixture.tester().assertResources("Write only -> smallest possible", - 4, 1, 1.1, 17.2, 80.4, + 4, 1, 1.1, 17.2, 110.9, fixture.autoscale()); } @@ -666,24 +682,27 @@ public class AutoscalingTest { @Test public void test_changing_exclusivity() { + var min = new ClusterResources( 2, 1, new NodeResources( 1, 4, 100, 1)); + var max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1)); var fixture = AutoscalingTester.fixture() .awsProdSetup(true) .cluster(clusterSpec(true)) + .capacity(Capacity.from(min, max)) .initialResources(Optional.empty()) .build(); fixture.tester().assertResources("Initial deployment at minimum", - 2, 1, 2, 4, 10, + 2, 1, 2, 4, 100, fixture.currentResources().advertisedResources()); fixture.tester().deploy(fixture.applicationId(), clusterSpec(false), fixture.capacity()); fixture.tester().assertResources("With non-exclusive nodes, a better solution is " + "50% more nodes with half the cpu", - 3, 1, 1, 4, 10.2, + 3, 1, 1, 4, 145.6, fixture.autoscale()); fixture.tester().deploy(fixture.applicationId(), clusterSpec(true), fixture.capacity()); fixture.tester().assertResources("Reverts to the initial resources", - 2, 1, 2, 4, 10, + 2, 1, 2, 4, 100, fixture.currentResources().advertisedResources()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java index ff04083ebde..311428de8ff 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java @@ -189,14 +189,18 @@ public class Fixture { } public Fixture.Builder awsProdSetup(boolean allowHostSharing) { - return this.awsHostFlavors() - .awsResourceCalculator() - .zone(new Zone(Cloud.builder().dynamicProvisioning(true) - .allowHostSharing(allowHostSharing) - .build(), - SystemName.Public, - Environment.prod, - RegionName.from("aws-eu-west-1a"))); + return awsSetup(allowHostSharing, Environment.prod); + } + + public Fixture.Builder awsSetup(boolean allowHostSharing, Environment environment) { + return this.awsHostFlavors() + .awsResourceCalculator() + .zone(new Zone(Cloud.builder().dynamicProvisioning(true) + .allowHostSharing(allowHostSharing) + .build(), + SystemName.Public, + environment, + RegionName.from("aws-eu-west-1a"))); } public Fixture.Builder vespaVersion(Version version) { @@ -205,7 +209,7 @@ public class Fixture { } public Fixture.Builder hostFlavors(NodeResources ... hostResources) { - this.hostFlavors = Arrays.stream(hostResources).map(r -> new Flavor(r)).collect(Collectors.toList()); + this.hostFlavors = Arrays.stream(hostResources).map(r -> new Flavor(r)).toList(); return this; } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/AwsHostResourcesCalculatorImpl.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/AwsHostResourcesCalculatorImpl.java index d148f6d3cc7..2ae1fe18714 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/AwsHostResourcesCalculatorImpl.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/AwsHostResourcesCalculatorImpl.java @@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.provision.autoscale.awsnodes; import com.yahoo.config.provision.Flavor; import com.yahoo.config.provision.NodeResources; import com.yahoo.config.provision.NodeType; +import com.yahoo.config.provision.Zone; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.Nodelike; diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/AwsResourcesCalculator.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/AwsResourcesCalculator.java index 63f6d50ab2e..96fa143dc57 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/AwsResourcesCalculator.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/AwsResourcesCalculator.java @@ -2,6 +2,8 @@ package com.yahoo.vespa.hosted.provision.autoscale.awsnodes; import com.yahoo.config.provision.NodeResources; +import com.yahoo.config.provision.NodeType; +import com.yahoo.config.provision.Zone; /** * Calculations and logic on node resources common to provision-service and host-admin (at least). @@ -10,8 +12,12 @@ import com.yahoo.config.provision.NodeResources; */ public class AwsResourcesCalculator { + private final ReservedSpacePolicyImpl reservedSpacePolicy; private final double hostMemory = 0.6; - private final double hostDiskOverhead = 1; + + public AwsResourcesCalculator() { + this.reservedSpacePolicy = new ReservedSpacePolicyImpl(); + } /** The real resources of a parent host node in the node repository, given the real resources of the flavor. */ public NodeResources realResourcesOfParentHost(NodeResources realResourcesOfFlavor) { @@ -52,6 +58,7 @@ public class AwsResourcesCalculator { */ public double diskOverhead(VespaFlavor flavor, NodeResources resources, boolean real, boolean exclusive) { if ( flavor.realResources().storageType() != NodeResources.StorageType.local) return 0; + double hostDiskOverhead = reservedSpacePolicy.getPartitionSizeInBase2Gb(NodeType.host, ! exclusive); double diskShare = resources.diskGb() / ( flavor.advertisedResources().diskGb() - ( real ? hostDiskOverhead : 0) ); return hostDiskOverhead * diskShare; diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/ReservedSpacePolicyImpl.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/ReservedSpacePolicyImpl.java new file mode 100644 index 00000000000..000d08b59f8 --- /dev/null +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/ReservedSpacePolicyImpl.java @@ -0,0 +1,50 @@ +package com.yahoo.vespa.hosted.provision.autoscale.awsnodes; + +import com.yahoo.config.provision.NodeType; + +/** + * Matches the internal repo implementation + * + * @author hakonhall + * @author musum + */ +public class ReservedSpacePolicyImpl { + + public long getPartitionSizeInBase2Gb(NodeType nodeType, boolean sharedHost) { + return new PartitionSizer(nodeType, sharedHost).getPartitionSize(); + } + + private static class PartitionSizer { + + private static final long imageCountForSharedHost = 6; + private static final long imageCountForNonSharedHost = 3; + + // Add a buffer to allow a small increase in image size + private static final long bufferSharedHost = 5; + private static final long bufferNonSharedHost = 3; + + private final boolean sharedHost; + + PartitionSizer(NodeType nodeType, boolean sharedHost) { + this.sharedHost = sharedHost; + } + + long getPartitionSize() { + return imageSize() * imageCount() + buffer(); + } + + private long imageSize() { + return (long)7.7; // return (long)VespaContainerImage.maxImageSize(hostedSystem, nodeType); + } + + private long buffer() { + return sharedHost ? bufferSharedHost : bufferNonSharedHost; + } + + private long imageCount() { + return sharedHost ? imageCountForSharedHost : imageCountForNonSharedHost; + } + + } + +} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/VespaFlavor.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/VespaFlavor.java index cd5f18db516..c42b61988e9 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/VespaFlavor.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/awsnodes/VespaFlavor.java @@ -34,4 +34,7 @@ public class VespaFlavor { public NodeResources advertisedResources() { return advertisedResources; } + @Override + public String toString() { return "flavor " + name; } + } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java index 876ff058bf7..2d05754d96e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTest.java @@ -12,8 +12,12 @@ import com.yahoo.config.provision.SystemName; import com.yahoo.config.provision.Zone; import com.yahoo.test.ManualClock; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.applications.Cluster; import com.yahoo.vespa.hosted.provision.applications.ScalingEvent; +import com.yahoo.vespa.hosted.provision.autoscale.ClusterModel; +import com.yahoo.vespa.hosted.provision.node.Agent; +import com.yahoo.vespa.hosted.provision.node.History; import com.yahoo.vespa.hosted.provision.testutils.MockDeployer; import org.junit.Test; @@ -198,7 +202,7 @@ public class AutoscalingMaintainerTest { } @Test - public void test_autoscaling_ignores_high_cpu_right_after_generation_change() { + public void test_autoscaling_ignores_measurements_during_warmup() { ApplicationId app1 = AutoscalingMaintainerTester.makeApplicationId("app1"); ClusterSpec cluster1 = AutoscalingMaintainerTester.containerClusterSpec(); NodeResources resources = new NodeResources(4, 4, 10, 1); @@ -207,26 +211,46 @@ public class AutoscalingMaintainerTest { var capacity = Capacity.from(min, max); var tester = new AutoscalingMaintainerTester(new MockDeployer.ApplicationContext(app1, cluster1, capacity)); + // Add a scaling event tester.deploy(app1, cluster1, capacity); - // fast completion - tester.addMeasurements(1.0f, 0.3f, 0.3f, 0, 1, app1); - tester.clock().advance(Duration.ofSeconds(150)); - tester.addMeasurements(1.0f, 0.3f, 0.3f, 0, 1, app1); - tester.clock().advance(Duration.ofSeconds(150)); - tester.addMeasurements(1.0f, 0.3f, 0.3f, 0, 1, app1); + tester.addMeasurements(1.0f, 0.3f, 0.3f, 0, 4, app1); tester.maintainer().maintain(); assertEquals("Scale up: " + tester.cluster(app1, cluster1).autoscalingStatus(), 1, tester.cluster(app1, cluster1).lastScalingEvent().get().generation()); - // fast completion, with initially overloaded cpu - tester.addMeasurements(3.0f, 0.3f, 0.3f, 1, 1, app1); - tester.clock().advance(Duration.ofSeconds(150)); - tester.addMeasurements(0.2f, 0.3f, 0.3f, 1, 1, app1); + // measurements with outdated generation are ignored -> no autoscaling + var duration = tester.addMeasurements(3.0f, 0.3f, 0.3f, 0, 2, app1); tester.maintainer().maintain(); - assertEquals("No autoscaling since we ignore the (first) data point in the warup period", + assertEquals("Measurements with outdated generation are ignored -> no autoscaling", 1, tester.cluster(app1, cluster1).lastScalingEvent().get().generation()); + tester.clock().advance(duration.negated()); + + duration = tester.addMeasurements(3.0f, 0.3f, 0.3f, 1, 2, app1); + tester.maintainer().maintain(); + assertEquals("Measurements right after generation change are ignored -> no autoscaling", + 1, + tester.cluster(app1, cluster1).lastScalingEvent().get().generation()); + tester.clock().advance(duration.negated()); + + // Add a restart event + tester.clock().advance(ClusterModel.warmupDuration.plus(Duration.ofMinutes(1))); + tester.nodeRepository().nodes().list().owner(app1).asList().forEach(node -> recordRestart(node, tester.nodeRepository())); + + duration = tester.addMeasurements(3.0f, 0.3f, 0.3f, 1, 2, app1); + tester.maintainer().maintain(); + assertEquals("Measurements right after restart are ignored -> no autoscaling", + 1, + tester.cluster(app1, cluster1).lastScalingEvent().get().generation()); + tester.clock().advance(duration.negated()); + + tester.clock().advance(ClusterModel.warmupDuration.plus(Duration.ofMinutes(1))); + tester.addMeasurements(3.0f, 0.3f, 0.3f, 1, 2, app1); + tester.maintainer().maintain(); + assertEquals("We have valid measurements -> scale up", + 2, + tester.cluster(app1, cluster1).lastScalingEvent().get().generation()); } @Test @@ -300,6 +324,13 @@ public class AutoscalingMaintainerTest { tester.cluster(application, cluster).lastScalingEvent().get().generation()); } + private void recordRestart(Node node, NodeRepository nodeRepository) { + var upEvent = new History.Event(History.Event.Type.up, Agent.system, nodeRepository.clock().instant()); + try (var locked = nodeRepository.nodes().lockAndGetRequired(node)) { + nodeRepository.nodes().write(locked.node().with(locked.node().history().with(upEvent)), locked); + } + } + private void assertEvent(String explanation, long expectedGeneration, Optional<Instant> expectedCompletion, ScalingEvent event) { assertEquals(explanation + ". Generation", expectedGeneration, event.generation()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java index 59080f60982..4978fe431dc 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java @@ -311,7 +311,7 @@ public class FailedExpirerTest { public FailureScenario setReady(String... hostname) { List<Node> nodes = Stream.of(hostname) .map(this::get) - .collect(Collectors.toList()); + .toList(); nodes = nodeRepository.nodes().deallocate(nodes, Agent.system, getClass().getSimpleName()); tester.move(Node.State.ready, nodes); return this; diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java index b26d9f677db..a1b2fb5d0ad 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainerTest.java @@ -690,7 +690,7 @@ public class HostCapacityMaintainerTest { flavor.resources(), Generation.initial(), false)); - List<Address> addresses = Stream.of(additionalHostnames).map(Address::new).collect(Collectors.toList()); + List<Address> addresses = Stream.of(additionalHostnames).map(Address::new).toList(); Node.Builder builder = Node.create("fake-id-" + hostname, hostname, flavor, state, nodeType) .ipConfig(new IP.Config(state == Node.State.active ? Set.of("::1") : Set.of(), Set.of(), addresses)); parentHostname.ifPresent(builder::parentHostname); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java index d47b8955b56..d7e2dbb6f58 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java @@ -78,7 +78,7 @@ public class InactiveAndFailedExpirerTest { Node ready = tester.move(Node.State.ready, dirty.asList().get(0)); assertEquals("Allocated history is removed on readying", List.of(History.Event.Type.provisioned, History.Event.Type.readied), - ready.history().events().stream().map(History.Event::type).collect(Collectors.toList())); + ready.history().events().stream().map(History.Event::type).toList()); // Dirty times out for the other one tester.advanceTime(Duration.ofMinutes(14)); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java index 77dbc86769f..4f23ea3a578 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java @@ -306,7 +306,7 @@ public class MetricsReporterTest { private Number getMetric(String name, TestMetric metric, Map<String, String> dimensions) { List<TestMetric.TestContext> metrics = metric.context.get(name).stream() .filter(ctx -> ctx.properties.entrySet().containsAll(dimensions.entrySet())) - .collect(Collectors.toList()); + .toList(); if (metrics.isEmpty()) throw new IllegalArgumentException("No value found for metric " + name + " with dimensions " + dimensions); return metrics.get(metrics.size() - 1).value; } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java index ede958ef083..47803594148 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java @@ -563,33 +563,35 @@ public class NodeFailerTest { NodeList allNodes = tester.nodeRepository.nodes().list(); assertEquals(500, allNodes.size()); - // 2 hours pass, 20 nodes (4%) die + // 2 hours pass, many nodes fail tester.runMaintainers(); + int downNodes = 25; // 5% + int allowedToFail = 20; // 4% allNodes.state(Node.State.active) .nodeType(NodeType.tenant) .stream() - .limit(20) + .limit(downNodes) .forEach(host -> tester.serviceMonitor.setHostDown(host.hostname())); tester.runMaintainers(); tester.clock.advance(Duration.ofHours(2)); tester.runMaintainers(); - // 3% are allowed to fail - assertEquals(15, tester.nodeRepository.nodes().list(Node.State.failed).size()); + // Fails nodes up to throttle limit + assertEquals(allowedToFail, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); - assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); + assertEquals("Throttled node failures", downNodes - allowedToFail, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); // 6 more hours pass, no more nodes are failed tester.clock.advance(Duration.ofHours(6)); tester.runMaintainers(); - assertEquals(15, tester.nodeRepository.nodes().list(Node.State.failed).size()); + assertEquals(allowedToFail, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); - assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); + assertEquals("Throttled node failures", downNodes - allowedToFail, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); - // 18 more hours pass, 24 hours since the first 10 nodes were failed. The remaining 5 are failed + // 18 more hours pass, 24 hours since the first batch of nodes were failed. The remaining nodes are failed tester.clock.advance(Duration.ofHours(18)); tester.runMaintainers(); - assertEquals(20, tester.nodeRepository.nodes().list(Node.State.failed).size()); + assertEquals(downNodes, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made.", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainerTest.java index 8a84c81e082..d379513a8f9 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainerTest.java @@ -49,7 +49,7 @@ public class NodeMetricsDbMaintainerTest { assertEquals(2, timeseriesList.size()); List<NodeMetricSnapshot> allSnapshots = timeseriesList.stream() .flatMap(timeseries -> timeseries.asList().stream()) - .collect(Collectors.toList()); + .toList(); assertTrue(allSnapshots.stream().anyMatch(snapshot -> snapshot.inService())); assertTrue(allSnapshots.stream().anyMatch(snapshot -> ! snapshot.inService())); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java index 5613f1961f8..fb3c6ec89d9 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java @@ -142,7 +142,7 @@ public class NodeRebooterTest { /** Returns the subset of the given nodes which have the given current reboot generation */ private List<Node> withCurrentRebootGeneration(long generation, List<Node> nodes) { - return nodes.stream().filter(n -> n.status().reboot().current() == generation).collect(Collectors.toList()); + return nodes.stream().filter(n -> n.status().reboot().current() == generation).toList(); } private static ProvisioningTester createTester(Duration rebootInterval, InMemoryFlagSource flagSource) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java index 329e6c833a9..32886733856 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java @@ -44,7 +44,7 @@ public class ProvisionedExpirerTest { private void populateNodeRepo() { var nodes = IntStream.range(0, 25) .mapToObj(i -> Node.create("id-" + i, "host-" + i, new Flavor(NodeResources.unspecified()), Node.State.provisioned, NodeType.host).build()) - .collect(Collectors.toList()); + .toList(); tester.nodeRepository().database().addNodesInState(nodes, Node.State.provisioned, Agent.system); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancerTest.java index 6be07f6f702..6895f5eeae7 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancerTest.java @@ -233,7 +233,7 @@ public class SwitchRebalancerTest { private static MockDeployer deployer(ProvisioningTester tester, Capacity capacity, ClusterSpec first, ClusterSpec... rest) { List<ClusterContext> clusterContexts = Stream.concat(Stream.of(first), Stream.of(rest)) .map(spec -> new ClusterContext(app, spec, capacity)) - .collect(Collectors.toList()); + .toList(); ApplicationContext context = new ApplicationContext(app, clusterContexts); return new MockDeployer(tester.provisioner(), tester.clock(), Map.of(app, context)); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/node/HistoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/node/HistoryTest.java index ccd60ded517..f8fe1c1ef73 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/node/HistoryTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/node/HistoryTest.java @@ -28,7 +28,7 @@ public class HistoryTest { assertEquals(3, history.log().size()); assertEquals("Most recent events are kept", List.of(2L, 3L, 4L), - history.log().stream().map(e -> e.at().toEpochMilli()).collect(Collectors.toList())); + history.log().stream().map(e -> e.at().toEpochMilli()).toList()); } private static List<Event> shuffledEvents(int count) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/node/IPTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/node/IPTest.java index be35176ed98..f4610e722a8 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/node/IPTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/node/IPTest.java @@ -66,7 +66,7 @@ public class IPTest { .map(IP::parse) .sorted(IP.NATURAL_ORDER) .map(IP::asString) - .collect(Collectors.toList()) + .toList() ); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java index 1741dbdb749..4635d3ff525 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java @@ -129,7 +129,7 @@ public class OsVersionsTest { minVersion(nodesUpgrading, OsVersion::wanted)); var nodesOnLowestVersion = nodes.asList().stream() .sorted(Comparator.comparing(node -> node.status().osVersion().current().orElse(Version.emptyVersion))) - .collect(Collectors.toList()) + .toList() .subList(0, maxActiveUpgrades); assertEquals("Nodes on lowest version are told to upgrade", nodesUpgrading.asList(), nodesOnLowestVersion); @@ -543,7 +543,7 @@ public class OsVersionsTest { return nodes.stream() .map(Node::hostname) .flatMap(hostname -> tester.nodeRepository().nodes().node(hostname).stream()) - .collect(Collectors.toList()); + .toList(); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java index 307c9db38c6..d5722a59f3e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java @@ -11,6 +11,7 @@ import com.yahoo.vespa.hosted.provision.lb.DnsZone; import com.yahoo.vespa.hosted.provision.lb.LoadBalancer; import com.yahoo.vespa.hosted.provision.lb.LoadBalancerId; import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance; +import com.yahoo.vespa.hosted.provision.lb.PrivateServiceId; import com.yahoo.vespa.hosted.provision.lb.Real; import org.junit.Test; @@ -45,7 +46,8 @@ public class LoadBalancerSerializerTest { new Real(DomainName.of("real-2"), "127.0.0.2", 4080)), - Optional.of(new LoadBalancerSettings(List.of("123"))), + new LoadBalancerSettings(List.of("123")), + Optional.of(PrivateServiceId.of("foo")), CloudAccount.from("012345678912"))), LoadBalancer.State.active, now); @@ -60,6 +62,7 @@ public class LoadBalancerSerializerTest { assertEquals(loadBalancer.changedAt().truncatedTo(MILLIS), serialized.changedAt()); assertEquals(loadBalancer.instance().get().reals(), serialized.instance().get().reals()); assertEquals(loadBalancer.instance().get().settings(), serialized.instance().get().settings()); + assertEquals(loadBalancer.instance().get().serviceId(), serialized.instance().get().serviceId()); assertEquals(loadBalancer.instance().get().cloudAccount(), serialized.instance().get().cloudAccount()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java index e8155234989..ab487cc7d04 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java @@ -167,7 +167,7 @@ public class NodeSerializerTest { assertEquals(3, node.allocation().get().restartGeneration().wanted()); assertEquals(4, node.allocation().get().restartGeneration().current()); assertEquals(List.of(History.Event.Type.provisioned, History.Event.Type.reserved), - node.history().events().stream().map(History.Event::type).collect(Collectors.toList())); + node.history().events().stream().map(History.Event::type).toList()); assertTrue(node.allocation().get().removable()); assertEquals(NodeType.tenant, node.type()); } @@ -368,7 +368,7 @@ public class NodeSerializerTest { assertEquals(Version.fromString("7.2"), serialized.status().osVersion().current().get()); var osUpgradedEvents = serialized.history().events().stream() .filter(event -> event.type() == History.Event.Type.osUpgraded) - .collect(Collectors.toList()); + .toList(); assertEquals("OS upgraded event is added", 1, osUpgradedEvents.size()); assertEquals("Duplicate updates of same version uses earliest instant", Instant.ofEpochMilli(123), osUpgradedEvents.get(0).at()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java index ea9c0e1193d..d26ac4d3916 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java @@ -245,12 +245,12 @@ public class AclProvisioningTest { .flatMap(List::stream) .distinct() .sorted(Comparator.comparing(TrustedNode::hostname)) - .collect(Collectors.toList()); + .toList(); List<TrustedNode> actualTrustedNodes = actual.stream() .flatMap(acl -> acl.trustedNodes().stream()) .distinct() .sorted(Comparator.comparing(TrustedNode::hostname)) - .collect(Collectors.toList()); + .toList(); assertEquals(expectedTrustedNodes, actualTrustedNodes); Set<String> actualTrustedNetworks = actual.stream() diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java index 623ce47b611..66b278a52db 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTest.java @@ -190,7 +190,7 @@ public class DynamicProvisioningTest { var indices = tester.nodeRepository().nodes().list().owner(app).stream() .map(node -> node.allocation().get().membership().index()) .collect(Collectors.toSet()); - assertTrue(indices.containsAll(IntStream.range(0, 10).boxed().collect(Collectors.toList()))); + assertTrue(indices.containsAll(IntStream.range(0, 10).boxed().toList())); } @Test diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/HostCapacityTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/HostCapacityTest.java index 31643b2ac79..ea2af5f3fca 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/HostCapacityTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/HostCapacityTest.java @@ -167,7 +167,7 @@ public class HostCapacityTest { } private Node setupHostWithAdditionalHostnames(String hostHostname, String... additionalHostnames) { - List<Address> addresses = Stream.of(additionalHostnames).map(Address::new).collect(Collectors.toList()); + List<Address> addresses = Stream.of(additionalHostnames).map(Address::new).toList(); doAnswer(invocation -> ((Flavor)invocation.getArguments()[0]).resources()) .when(hostResourcesCalculator).advertisedResourcesOf(any()); @@ -181,7 +181,7 @@ public class HostCapacityTest { } private boolean hasCapacity(NodeResources requestedCapacity, Node host, Node... remainingNodes) { - List<Node> nodes = Stream.concat(Stream.of(host), Stream.of(remainingNodes)).collect(Collectors.toList()); + List<Node> nodes = Stream.concat(Stream.of(host), Stream.of(remainingNodes)).toList(); var capacity = new HostCapacity(new LockedNodeList(nodes, () -> {}), hostResourcesCalculator); return capacity.hasCapacity(host, requestedCapacity); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java index 3c98cdef93b..9a38cbbba44 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/InfraDeployerImplTest.java @@ -133,7 +133,7 @@ public class InfraDeployerImplTest { @SuppressWarnings("unchecked") private void verifyActivated(String... hostnames) { verify(duperModelInfraApi).infraApplicationActivated( - eq(application.getApplicationId()), eq(Stream.of(hostnames).map(HostName::of).collect(Collectors.toList()))); + eq(application.getApplicationId()), eq(Stream.of(hostnames).map(HostName::of).toList())); ArgumentMatcher<ApplicationTransaction> transactionMatcher = t -> { assertEquals(application.getApplicationId(), t.application()); return true; diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java index a19f986a177..30bd1250430 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java @@ -320,14 +320,14 @@ public class LoadBalancerProvisionerTest { tester.activate(app1, prepare(app1, capacity, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("c1")))); LoadBalancerList loadBalancers = tester.nodeRepository().loadBalancers().list(); assertEquals(1, loadBalancers.size()); - assertEquals(LoadBalancerSettings.empty, loadBalancers.first().get().instance().get().settings().get()); + assertEquals(LoadBalancerSettings.empty, loadBalancers.first().get().instance().get().settings()); // Next deployment contains new settings LoadBalancerSettings settings = new LoadBalancerSettings(List.of("alice", "bob")); tester.activate(app1, prepare(app1, capacity, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("c1"), Optional.empty(), settings))); loadBalancers = tester.nodeRepository().loadBalancers().list(); assertEquals(1, loadBalancers.size()); - assertEquals(settings, loadBalancers.first().get().instance().get().settings().get()); + assertEquals(settings, loadBalancers.first().get().instance().get().settings()); } @@ -377,13 +377,13 @@ public class LoadBalancerProvisionerTest { List<String> reals = loadBalancers.get(0).instance().get().reals().stream() .map(real -> real.hostname().value()) .sorted() - .collect(Collectors.toList()); + .toList(); List<String> activeNodes = tester.nodeRepository().nodes().list(states) .owner(application) .cluster(cluster) .hostnames().stream() .sorted() - .collect(Collectors.toList()); + .toList(); assertEquals("Load balancer targets active nodes of " + application + " in " + cluster, activeNodes, reals); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java index 12f1abf0cf5..4af5728f167 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java @@ -194,7 +194,7 @@ public class NodeTypeProvisioningTest { // Verify that wantToRetire has been propagated List<Node> nodesCurrentlyRetiring = nodes.stream() .filter(node -> node.allocation().get().membership().retired()) - .collect(Collectors.toList()); + .toList(); assertEquals(5, nodesCurrentlyRetiring.size()); // The retiring nodes should be the nodes we marked for retirement @@ -211,7 +211,7 @@ public class NodeTypeProvisioningTest { // Verify that wantToRetire has been propagated List<Node> nodesCurrentlyRetiring = nodes.stream() .filter(node -> node.allocation().get().membership().retired()) - .collect(Collectors.toList()); + .toList(); assertEquals(5, nodesCurrentlyRetiring.size()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java index bf15e4bbe1c..405d9578c95 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java @@ -572,7 +572,7 @@ public class ProvisioningTester { /** Returns the hosts from the input list which are not retired */ public List<HostSpec> nonRetired(Collection<HostSpec> hosts) { - return hosts.stream().filter(host -> ! host.membership().get().retired()).collect(Collectors.toList()); + return hosts.stream().filter(host -> ! host.membership().get().retired()).toList(); } public void assertAllocatedOn(String explanation, String hostFlavor, ApplicationId app) { @@ -595,7 +595,7 @@ public class ProvisioningTester { Optional<String> allocatedSwitchHostname = allNodes.parentOf(node).flatMap(Node::switchHostname); return allocatedSwitchHostname.isPresent() && allocatedSwitchHostname.get().equals(switchHostname); - }).collect(Collectors.toList()); + }).toList(); } public Set<String> switchesOf(NodeList applicationNodes, NodeList allNodes) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/load-balancers.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/load-balancers.json index 7ae283e3916..bbccc72c7f9 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/load-balancers.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/load-balancers.json @@ -28,7 +28,11 @@ "ipAddress": "127.0.10.1", "port": 4443 } - ] + ], + "settings": { + "allowedUrns": [ "arne" ] + }, + "serviceId": "service" }, { "id": "cfg:cfg:cfg:configservers", |