summaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authorMartin Polden <mpolden@mpolden.no>2021-02-11 09:20:30 +0100
committerGitHub <noreply@github.com>2021-02-11 09:20:30 +0100
commitb78423a2dbacd4505c18fc600009c7b3812eb402 (patch)
treedf4f36852eaf21ab4b6aaa62b69c4eab646ecf13 /node-repository
parentab86947680c13a882b9f62db1ec852e40a313332 (diff)
parent8e1cae2a0725567b15d49b99317523853cb033e4 (diff)
Merge pull request #16479 from vespa-engine/bratseth/remove-getNodes
Bratseth/remove get nodes
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java17
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java63
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java52
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java17
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java6
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java111
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java34
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java19
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java259
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java13
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java12
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivatorTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java52
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java11
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java13
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java28
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java12
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java35
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java12
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java23
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java36
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java17
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java49
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java68
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java36
58 files changed, 549 insertions, 566 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
index 19c1fa090c9..84aafa77c27 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java
@@ -271,4 +271,14 @@ public class NodeList extends AbstractFilteringList<Node, NodeList> {
return asList().toString();
}
+ @Override
+ public int hashCode() { return asList().hashCode(); }
+
+ @Override
+ public boolean equals(Object other) {
+ if (other == this) return true;
+ if ( ! (other instanceof NodeList)) return false;
+ return this.asList().equals(((NodeList) other).asList());
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index beec04b3b29..55495669802 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -193,7 +193,7 @@ public class NodeRepository extends AbstractComponent {
/** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */
public void remove(ApplicationTransaction transaction) {
- NodeList applicationNodes = nodes().list(transaction.application());
+ NodeList applicationNodes = nodes().list().owner(transaction.application());
NodeList activeNodes = applicationNodes.state(State.active);
nodes().deactivate(activeNodes.asList(), transaction);
db.writeTo(State.dirty,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 81fa7ed2d4b..4ab6f259374 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -176,7 +176,7 @@ public class Autoscaler {
return false;
// A deployment is ongoing
- if (nodeRepository.nodes().getNodes(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0)
+ if (nodeRepository.nodes().list(Node.State.reserved).owner(nodes.first().get().allocation().get().owner()).size() > 0)
return false;
return true;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java
index a881bde2a33..45173650d60 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java
@@ -75,7 +75,7 @@ public class MemoryMetricsDb implements MetricsDb {
private void add(String hostname, MetricSnapshot snapshot) {
NodeTimeseries timeseries = db.get(hostname);
if (timeseries == null) { // new node
- Optional<Node> node = nodeRepository.nodes().getNode(hostname);
+ Optional<Node> node = nodeRepository.nodes().node(hostname);
if (node.isEmpty()) return;
if (node.get().allocation().isEmpty()) return;
timeseries = new NodeTimeseries(hostname, new ArrayList<>());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java
index b93c7930b5b..b2d8ddfd414 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java
@@ -51,7 +51,7 @@ public class MetricsV2MetricsFetcher extends AbstractComponent implements Metric
@Override
public CompletableFuture<MetricsResponse> fetchMetrics(ApplicationId application) {
- NodeList applicationNodes = nodeRepository.nodes().list(application).state(Node.State.active);
+ NodeList applicationNodes = nodeRepository.nodes().list().owner(application).state(Node.State.active);
Optional<Node> metricsV2Container = applicationNodes.container()
.matching(node -> expectedUp(node))
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
index da5591e0800..d370681a087 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
@@ -34,14 +34,13 @@ public class SharedLoadBalancerService implements LoadBalancerService {
@Override
public LoadBalancerInstance create(LoadBalancerSpec spec, boolean force) {
- var proxyNodes = new ArrayList<>(nodeRepository.nodes().getNodes(NodeType.proxy));
- proxyNodes.sort(hostnameComparator);
+ var proxyNodes = nodeRepository.nodes().list().nodeType(NodeType.proxy).sortedBy(hostnameComparator);
if (proxyNodes.size() == 0) {
throw new IllegalStateException("Missing proxy nodes in node repository");
}
- var firstProxyNode = proxyNodes.get(0);
+ var firstProxyNode = proxyNodes.first().get();
var networkNames = proxyNodes.stream()
.flatMap(node -> node.ipConfig().primary().stream())
.map(SharedLoadBalancerService::withPrefixLength)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java
index 8ccb8980a71..5b9cd6a69e1 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java
@@ -41,7 +41,7 @@ public abstract class Expirer extends NodeRepositoryMaintainer {
@Override
protected boolean maintain() {
- List<Node> expired = nodeRepository().nodes().getNodes(fromState).stream()
+ List<Node> expired = nodeRepository().nodes().list(fromState).stream()
.filter(this::isExpired)
.collect(Collectors.toList());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
index 08edee0be8b..7317942c045 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
@@ -68,7 +68,7 @@ public class FailedExpirer extends NodeRepositoryMaintainer {
@Override
protected boolean maintain() {
- List<Node> remainingNodes = nodeRepository.nodes().getNodes(Node.State.failed).stream()
+ List<Node> remainingNodes = nodeRepository.nodes().list(Node.State.failed).stream()
.filter(node -> node.type() == NodeType.tenant ||
node.type() == NodeType.host)
.collect(Collectors.toList());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
index e8f216c793a..860076dd111 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
@@ -116,7 +116,7 @@ class MaintenanceDeployment implements Closeable {
Deployer deployer,
NodeRepository nodeRepository) {
if (lock.isEmpty()) return Optional.empty();
- if (nodeRepository.nodes().getNodes(application, Node.State.active).isEmpty()) return Optional.empty();
+ if (nodeRepository.nodes().list(Node.State.active).owner(application).isEmpty()) return Optional.empty();
return deployer.deployFromLocalActive(application);
}
@@ -168,7 +168,7 @@ class MaintenanceDeployment implements Closeable {
if ( ! deployment.prepare()) return false;
if (verifyTarget) {
expectedNewNode =
- nodeRepository.nodes().getNodes(application, Node.State.reserved).stream()
+ nodeRepository.nodes().list(Node.State.reserved).owner(application).stream()
.filter(n -> !n.hostname().equals(node.hostname()))
.filter(n -> n.allocation().get().membership().cluster().id().equals(node.allocation().get().membership().cluster().id()))
.findAny();
@@ -185,7 +185,7 @@ class MaintenanceDeployment implements Closeable {
markWantToRetire(node, false, agent, nodeRepository); // Necessary if this failed, no-op otherwise
// Immediately clean up if we reserved the node but could not activate or reserved a node on the wrong host
- expectedNewNode.flatMap(node -> nodeRepository.nodes().getNode(node.hostname(), Node.State.reserved))
+ expectedNewNode.flatMap(node -> nodeRepository.nodes().node(node.hostname(), Node.State.reserved))
.ifPresent(node -> nodeRepository.nodes().deallocate(node, agent, "Expired by " + agent));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
index 7c3e3eb4553..ba884ced630 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
@@ -30,7 +30,6 @@ import java.util.logging.Logger;
import java.util.stream.Collectors;
import static java.util.stream.Collectors.collectingAndThen;
-import static java.util.stream.Collectors.counting;
/**
* Maintains information in the node repo about when this node last responded to ping
@@ -129,11 +128,11 @@ public class NodeFailer extends NodeRepositoryMaintainer {
clock().instant().minus(downTimeLimit).minus(nodeRequestInterval);
Map<Node, String> nodesByFailureReason = new HashMap<>();
- for (Node node : nodeRepository().nodes().getNodes(Node.State.ready)) {
+ for (Node node : nodeRepository().nodes().list(Node.State.ready)) {
if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) {
nodesByFailureReason.put(node, "Not receiving config requests from node");
} else {
- Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().getNode(parent)).orElse(node);
+ Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
List<String> failureReports = reasonsToFailParentHost(hostNode);
if (failureReports.size() > 0) {
if (hostNode.equals(node)) {
@@ -148,7 +147,7 @@ public class NodeFailer extends NodeRepositoryMaintainer {
}
private Map<Node, String> getActiveNodesByFailureReason() {
- List<Node> activeNodes = nodeRepository().nodes().getNodes(Node.State.active);
+ NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Instant graceTimeEnd = clock().instant().minus(downTimeLimit);
Map<Node, String> nodesByFailureReason = new HashMap<>();
for (Node node : activeNodes) {
@@ -158,7 +157,7 @@ public class NodeFailer extends NodeRepositoryMaintainer {
nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit);
}
else if (hostSuspended(node, activeNodes)) {
- Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().getNode(parent)).orElse(node);
+ Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
if (hostNode.type().isHost()) {
List<String> failureReports = reasonsToFailParentHost(hostNode);
if (failureReports.size() > 0) {
@@ -184,7 +183,7 @@ public class NodeFailer extends NodeRepositoryMaintainer {
/** Returns whether node has any kind of hardware issue */
static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) {
- Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.nodes().getNode(parent)).orElse(node);
+ Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.nodes().node(parent)).orElse(node);
return reasonsToFailParentHost(hostNode).size() > 0;
}
@@ -224,7 +223,7 @@ public class NodeFailer extends NodeRepositoryMaintainer {
}
/** Is the node and all active children suspended? */
- private boolean hostSuspended(Node node, List<Node> activeNodes) {
+ private boolean hostSuspended(Node node, NodeList activeNodes) {
if (!nodeSuspended(node)) return false;
if (node.parentHostname().isPresent()) return true; // optimization
return activeNodes.stream()
@@ -246,7 +245,7 @@ public class NodeFailer extends NodeRepositoryMaintainer {
return true;
case proxy:
case proxyhost:
- return nodeRepository().nodes().getNodes(nodeType, Node.State.failed).size() == 0;
+ return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty();
default:
return false;
}
@@ -303,7 +302,7 @@ public class NodeFailer extends NodeRepositoryMaintainer {
private boolean throttle(Node node) {
if (throttlePolicy == ThrottlePolicy.disabled) return false;
Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow);
- List<Node> nodes = nodeRepository().nodes().getNodes();
+ NodeList nodes = nodeRepository().nodes().list();
NodeList recentlyFailedNodes = nodes.stream()
.filter(n -> n.state() == Node.State.failed)
.filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow))
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java
index 92131a1cd74..2950de285b9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java
@@ -58,7 +58,7 @@ public class NodeHealthTracker extends NodeRepositoryMaintainer {
// Update node last request events through ZooKeeper to collect request to all config servers.
// We do this here ("lazily") to avoid writing to zk for each config request.
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
- for (Node node : nodeRepository().nodes().getNodes(Node.State.ready)) {
+ for (Node node : nodeRepository().nodes().list(Node.State.ready)) {
Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname());
if (lastLocalRequest.isEmpty()) continue;
@@ -116,7 +116,7 @@ public class NodeHealthTracker extends NodeRepositoryMaintainer {
/** Get node by given hostname and application. The applicationLock must be held when calling this */
private Optional<Node> getNode(String hostname, ApplicationId application, @SuppressWarnings("unused") Mutex applicationLock) {
- return nodeRepository().nodes().getNode(hostname, Node.State.active)
+ return nodeRepository().nodes().node(hostname, Node.State.active)
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(application));
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java
index e2cafbb9406..6ee657beadd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java
@@ -39,7 +39,7 @@ public class NodeRebooter extends NodeRepositoryMaintainer {
@Override
protected boolean maintain() {
// Reboot candidates: Nodes in long-term states, where we know we can safely orchestrate a reboot
- List<Node> nodesToReboot = nodeRepository().nodes().getNodes(Node.State.active, Node.State.ready).stream()
+ List<Node> nodesToReboot = nodeRepository().nodes().list(Node.State.active, Node.State.ready).stream()
.filter(node -> node.type().isHost())
.filter(this::shouldReboot)
.collect(Collectors.toList());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
index 8253b3def0a..e0f6c9d78bb 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
@@ -9,6 +9,7 @@ import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import java.time.Duration;
@@ -75,8 +76,8 @@ public class PeriodicApplicationMaintainer extends ApplicationMaintainer {
return ! skipMaintenanceDeployment.value();
}
- protected List<Node> nodesNeedingMaintenance() {
- return nodeRepository().nodes().getNodes(Node.State.active);
+ protected NodeList nodesNeedingMaintenance() {
+ return nodeRepository().nodes().list(Node.State.active);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
index 337d25ca732..3064ac2d16b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
@@ -7,6 +7,7 @@ import com.yahoo.config.provision.Deployer;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.History;
import com.yahoo.vespa.orchestrator.OrchestrationException;
@@ -45,7 +46,7 @@ public class RetiredExpirer extends NodeRepositoryMaintainer {
@Override
protected boolean maintain() {
- List<Node> activeNodes = nodeRepository().nodes().getNodes(Node.State.active);
+ NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, List<Node>> retiredNodesByApplication = activeNodes.stream()
.filter(node -> node.allocation().isPresent())
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java
index debc1484e58..ca580753fc8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java
@@ -165,7 +165,7 @@ public class SpareCapacityMaintainer extends NodeRepositoryMaintainer {
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) return; // this will be done at another config server
- Optional<Node> nodeWithWantToRetire = nodeRepository().nodes().getNode(nodeToRetire.get().hostname())
+ Optional<Node> nodeWithWantToRetire = nodeRepository().nodes().node(nodeToRetire.get().hostname())
.map(node -> node.withWantToRetire(true, Agent.SpareCapacityMaintainer, nodeRepository().clock().instant()));
if (nodeWithWantToRetire.isEmpty()) return;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index d61c6f38306..3e94201d87a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -70,53 +70,24 @@ public class Nodes {
* @param inState the states the node may be in. If no states are given, it will be returned from any state
* @return the node, or empty if it was not found in any of the given states
*/
- public Optional<Node> getNode(String hostname, Node.State... inState) {
+ public Optional<Node> node(String hostname, Node.State... inState) {
return db.readNode(hostname, inState);
}
/**
- * Returns all nodes in any of the given states.
+ * Returns a list of nodes in this repository in any of the given states
*
* @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
- * @return the node, or empty if it was not found in any of the given states
- */
- public List<Node> getNodes(Node.State... inState) {
- return new ArrayList<>(db.readNodes(inState));
- }
- /**
- * Finds and returns the nodes of the given type in any of the given states.
- *
- * @param type the node type to return
- * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
- * @return the node, or empty if it was not found in any of the given states
*/
- public List<Node> getNodes(NodeType type, Node.State... inState) {
- return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList());
- }
-
- /** Returns a filterable list of nodes in this repository in any of the given states */
public NodeList list(Node.State... inState) {
- return NodeList.copyOf(getNodes(inState));
- }
-
- public NodeList list(ApplicationId application, Node.State... inState) {
- return NodeList.copyOf(getNodes(application, inState));
- }
-
- /** Returns a filterable list of all nodes of an application */
- public NodeList list(ApplicationId application) {
- return NodeList.copyOf(getNodes(application));
+ return NodeList.copyOf(db.readNodes(inState));
}
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
- return new LockedNodeList(getNodes(), lock);
+ return new LockedNodeList(list().asList(), lock);
}
- public List<Node> getNodes(ApplicationId id, Node.State... inState) { return db.readNodes(id, inState); }
- public List<Node> getInactive() { return db.readNodes(Node.State.inactive); }
- public List<Node> getFailed() { return db.readNodes(Node.State.failed); }
-
/**
* Returns whether the zone managed by this node repository seems to be working.
* If too many nodes are not responding, there is probably some zone-wide issue
@@ -138,7 +109,7 @@ public class Nodes {
illegal("Cannot add " + node + ": This is not a docker node");
if (node.allocation().isEmpty())
illegal("Cannot add " + node + ": Docker containers needs to be allocated");
- Optional<Node> existing = getNode(node.hostname());
+ Optional<Node> existing = node(node.hostname());
if (existing.isPresent())
illegal("Cannot add " + node + ": A node with this name already exists (" +
existing.get() + ", " + existing.get().history() + "). Node to be added: " +
@@ -165,7 +136,7 @@ public class Nodes {
illegal("Cannot add nodes: " + node + " is duplicated in the argument list");
}
- Optional<Node> existing = getNode(node.hostname());
+ Optional<Node> existing = node(node.hostname());
if (existing.isPresent()) {
if (existing.get().state() != Node.State.deprovisioned)
illegal("Cannot add " + node + ": A node with this name already exists");
@@ -203,7 +174,7 @@ public class Nodes {
}
public Node setReady(String hostname, Agent agent, String reason) {
- Node nodeToReady = getNode(hostname).orElseThrow(() ->
+ Node nodeToReady = node(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found"));
if (nodeToReady.state() == Node.State.ready) return nodeToReady;
@@ -255,7 +226,7 @@ public class Nodes {
}
public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) {
- Node nodeToDirty = getNode(hostname).orElseThrow(() ->
+ Node nodeToDirty = node(hostname).orElseThrow(() ->
new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found"));
List<Node> nodesToDirty =
@@ -368,7 +339,7 @@ public class Nodes {
* Moves a host to breakfixed state, removing any children.
*/
public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) {
- Node node = getNode(hostname).orElseThrow(() ->
+ Node node = node(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found"));
try (Mutex lock = lockUnallocated()) {
@@ -397,7 +368,7 @@ public class Nodes {
private Node move(String hostname, boolean keepAllocation, Node.State toState, Agent agent, Optional<String> reason,
NestedTransaction transaction) {
- Node node = getNode(hostname).orElseThrow(() ->
+ Node node = node(hostname).orElseThrow(() ->
new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found"));
if (!keepAllocation && node.allocation().isPresent()) {
@@ -421,7 +392,7 @@ public class Nodes {
// TODO: Work out a safe lock acquisition strategy for moves, e.g. migrate to lockNode.
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
- for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) {
+ for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
@@ -436,7 +407,7 @@ public class Nodes {
* containers this will remove the node from node repository, otherwise the node will be moved to state ready.
*/
public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) {
- Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
+ Node node = node(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) {
if (node.state() != Node.State.dirty)
illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]");
@@ -445,7 +416,7 @@ public class Nodes {
if (node.state() == Node.State.ready) return node;
- Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node);
+ Node parentHost = node.parentHostname().flatMap(this::node).orElse(node);
List<String> failureReasons = NodeFailer.reasonsToFailParentHost(parentHost);
if ( ! failureReasons.isEmpty())
illegal(node + " cannot be readied because it has hard failures: " + failureReasons);
@@ -459,7 +430,7 @@ public class Nodes {
* @return a List of all the nodes that have been removed or (for hosts) deprovisioned
*/
public List<Node> removeRecursively(String hostname) {
- Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
+ Node node = node(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
return removeRecursively(node, false);
}
@@ -687,9 +658,9 @@ public class Nodes {
Mutex lockToClose = lock(staleNode);
try {
// As an optimization we first try finding the node in the same state
- Optional<Node> freshNode = getNode(staleNode.hostname(), staleNode.state());
+ Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
if (freshNode.isEmpty()) {
- freshNode = getNode(staleNode.hostname());
+ freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
return Optional.empty();
}
@@ -715,7 +686,7 @@ public class Nodes {
/** Returns the unallocated/application lock, and the node acquired under that lock. */
public Optional<NodeMutex> lockAndGet(String hostname) {
- return getNode(hostname).flatMap(this::lockAndGet);
+ return node(hostname).flatMap(this::lockAndGet);
}
/** Returns the unallocated/application lock, and the node acquired under that lock. */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
index 696853b2992..6150ee9f4a0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
@@ -261,6 +261,8 @@ public class CuratorDatabaseClient {
/**
* Returns all nodes which are in one of the given states.
* If no states are given this returns all nodes.
+ *
+ * @return the nodes in a mutable list owned by the caller
*/
public List<Node> readNodes(Node.State ... states) {
List<Node> nodes = new ArrayList<>();
@@ -276,16 +278,6 @@ public class CuratorDatabaseClient {
return nodes;
}
- /**
- * Returns all nodes allocated to the given application which are in one of the given states
- * If no states are given this returns all nodes.
- */
- public List<Node> readNodes(ApplicationId applicationId, Node.State ... states) {
- List<Node> nodes = readNodes(states);
- nodes.removeIf(node -> ! node.allocation().isPresent() || ! node.allocation().get().owner().equals(applicationId));
- return nodes;
- }
-
/**
* Returns a particular node, or empty if this node is not in any of the given states.
* If no states are given this returns the node if it is present in any state.
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index cad9faacf20..3c936e4e6ba 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -126,7 +126,7 @@ class Activator {
private void unreserveParentsOf(List<Node> nodes) {
for (Node node : nodes) {
if ( node.parentHostname().isEmpty()) continue;
- Optional<Node> parentNode = nodeRepository.nodes().getNode(node.parentHostname().get());
+ Optional<Node> parentNode = nodeRepository.nodes().node(node.parentHostname().get());
if (parentNode.isEmpty()) continue;
if (parentNode.get().reservedTo().isEmpty()) continue;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index 156d1023bbc..b1bba656dc8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -210,8 +210,7 @@ public class LoadBalancerProvisioner {
/** Returns the load balanced clusters of given application and their nodes */
private Map<ClusterSpec.Id, List<Node>> loadBalancedClustersOf(ApplicationId application) {
- NodeList nodes = NodeList.copyOf(nodeRepository.nodes().getNodes(Node.State.reserved, Node.State.active))
- .owner(application);
+ NodeList nodes = nodeRepository.nodes().list(Node.State.reserved, Node.State.active).owner(application);
if (nodes.stream().anyMatch(node -> node.type() == NodeType.config)) {
nodes = nodes.nodeType(NodeType.config).type(ClusterSpec.Type.admin);
} else if (nodes.stream().anyMatch(node -> node.type() == NodeType.controller)) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 825ea82e95c..79e1005eb47 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -150,7 +150,7 @@ public class NodeRepositoryProvisioner implements Provisioner {
private ClusterResources currentResources(ApplicationId applicationId,
ClusterSpec clusterSpec,
Capacity requested) {
- List<Node> nodes = NodeList.copyOf(nodeRepository.nodes().getNodes(applicationId, Node.State.active))
+ List<Node> nodes = nodeRepository.nodes().list(Node.State.active).owner(applicationId)
.cluster(clusterSpec.id())
.not().retired()
.not().removable()
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index 18ab9b70491..87b3742efb4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -85,7 +85,7 @@ class Preparer {
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
- for (Node node : nodeRepository.nodes().getNodes(application, Node.State.active)) {
+ for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
@@ -127,7 +127,7 @@ class Preparer {
*/
private int findHighestIndex(ApplicationId application, ClusterSpec cluster) {
int highestIndex = -1;
- for (Node node : nodeRepository.nodes().getNodes(application, Node.State.allocatedStates().toArray(new Node.State[0]))) {
+ for (Node node : nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0])).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(cluster.id())) continue;
if ( ! nodeCluster.type().equals(cluster.type())) continue;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java
index 708a2f73ee6..811afc77cef 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java
@@ -39,7 +39,7 @@ public class NodeAclResponse extends HttpResponse {
}
private void toSlime(String hostname, Cursor object) {
- Node node = nodeRepository.nodes().getNode(hostname)
+ Node node = nodeRepository.nodes().node(hostname)
.orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'"));
List<NodeAcl> acls = aclsForChildren ? nodeRepository.getChildAcls(node) :
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
index e71902f908b..1175736e517 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
@@ -110,13 +110,13 @@ class NodesResponse extends HttpResponse {
private void nodesToSlime(Node.State state, Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
for (NodeType type : NodeType.values())
- toSlime(nodeRepository.nodes().getNodes(type, state), nodeArray);
+ toSlime(nodeRepository.nodes().list(state).nodeType(type).asList(), nodeArray);
}
/** Outputs all the nodes to a node array */
private void nodesToSlime(Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
- toSlime(nodeRepository.nodes().getNodes(), nodeArray);
+ toSlime(nodeRepository.nodes().list().asList(), nodeArray);
}
private void toSlime(List<Node> nodes, Cursor array) {
@@ -127,7 +127,7 @@ class NodesResponse extends HttpResponse {
}
private void nodeToSlime(String hostname, Cursor object) {
- Node node = nodeRepository.nodes().getNode(hostname).orElseThrow(() ->
+ Node node = nodeRepository.nodes().node(hostname).orElseThrow(() ->
new NotFoundException("No node with hostname '" + hostname + "'"));
toSlime(node, true, object);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
index a6a58b6a9dd..c4c11222702 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
@@ -223,7 +223,7 @@ public class NodesV2ApiHandler extends LoggingRequestHandler {
private Node nodeFromRequest(HttpRequest request) {
String hostname = lastElement(request.getUri().getPath());
- return nodeRepository.nodes().getNode(hostname).orElseThrow(() ->
+ return nodeRepository.nodes().node(hostname).orElseThrow(() ->
new NotFoundException("No node found with hostname " + hostname));
}
@@ -435,7 +435,7 @@ public class NodesV2ApiHandler extends LoggingRequestHandler {
if (application.isEmpty())
return ErrorResponse.notFoundError("No application '" + id + "'");
Slime slime = ApplicationSerializer.toSlime(application.get(),
- nodeRepository.nodes().getNodes(id, Node.State.active),
+ nodeRepository.nodes().list(Node.State.active).owner(id).asList(),
withPath("/nodes/v2/applications/" + id, uri));
return new SlimeJsonResponse(slime);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java
index ce30baa3862..23c20adb842 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java
@@ -70,7 +70,7 @@ public class ServiceMonitorStub implements ServiceMonitor {
Map<ApplicationInstanceReference, ApplicationInstance> status = new HashMap<>();
for (Map.Entry<ApplicationId, MockDeployer.ApplicationContext> app : apps.entrySet()) {
Set<ServiceInstance> serviceInstances = new HashSet<>();
- for (Node node : nodeRepository.nodes().getNodes(app.getValue().id(), Node.State.active)) {
+ for (Node node : nodeRepository.nodes().list(Node.State.active).owner(app.getValue().id())) {
serviceInstances.add(new ServiceInstance(new ConfigId("configid"),
new HostName(node.hostname()),
getHostStatus(node.hostname())));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
index 0617884d227..c74ecaccd06 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
@@ -32,19 +32,19 @@ public class NodeRepositoryTest {
@Test
public void add_and_remove() {
NodeRepositoryTester tester = new NodeRepositoryTester();
- assertEquals(0, tester.nodeRepository().nodes().getNodes().size());
+ assertEquals(0, tester.nodeRepository().nodes().list().size());
tester.addHost("id1", "host1", "default", NodeType.host);
tester.addHost("id2", "host2", "default", NodeType.host);
tester.addHost("id3", "host3", "default", NodeType.host);
- assertEquals(3, tester.nodeRepository().nodes().getNodes().size());
+ assertEquals(3, tester.nodeRepository().nodes().list().size());
tester.nodeRepository().nodes().park("host2", true, Agent.system, "Parking to unit test");
tester.nodeRepository().nodes().removeRecursively("host2");
- assertEquals(3, tester.nodeRepository().nodes().getNodes().size());
- assertEquals(1, tester.nodeRepository().nodes().getNodes(Node.State.deprovisioned).size());
+ assertEquals(3, tester.nodeRepository().nodes().list().size());
+ assertEquals(1, tester.nodeRepository().nodes().list(Node.State.deprovisioned).size());
}
@Test
@@ -75,13 +75,13 @@ public class NodeRepositoryTest {
tester.setNodeState("cfg1", Node.State.dirty);
tester.nodeRepository().nodes().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName());
- assertEquals(Node.State.ready, tester.nodeRepository().nodes().getNode("host1").get().state());
+ assertEquals(Node.State.ready, tester.nodeRepository().nodes().node("host1").get().state());
tester.nodeRepository().nodes().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName());
- assertFalse(tester.nodeRepository().nodes().getNode("host2").isPresent());
+ assertFalse(tester.nodeRepository().nodes().node("host2").isPresent());
tester.nodeRepository().nodes().markNodeAvailableForNewAllocation("cfg1", Agent.system, getClass().getSimpleName());
- assertEquals(Node.State.ready, tester.nodeRepository().nodes().getNode("cfg1").get().state());
+ assertEquals(Node.State.ready, tester.nodeRepository().nodes().node("cfg1").get().state());
}
@Test
@@ -92,14 +92,14 @@ public class NodeRepositoryTest {
tester.setNodeState("host1", Node.State.dirty);
tester.setNodeState("host2", Node.State.dirty);
- Node node2 = tester.nodeRepository().nodes().getNode("host2").orElseThrow();
+ Node node2 = tester.nodeRepository().nodes().node("host2").orElseThrow();
var reportsBuilder = new Reports.Builder(node2.reports());
reportsBuilder.setReport(Report.basicReport("reportId", Report.Type.HARD_FAIL, Instant.EPOCH, "hardware failure"));
node2 = node2.with(reportsBuilder.build());
tester.nodeRepository().nodes().write(node2, () -> {});
tester.nodeRepository().nodes().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName());
- assertEquals(Node.State.ready, tester.nodeRepository().nodes().getNode("host1").get().state());
+ assertEquals(Node.State.ready, tester.nodeRepository().nodes().node("host1").get().state());
try {
tester.nodeRepository().nodes().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName());
@@ -120,7 +120,7 @@ public class NodeRepositoryTest {
tester.addNode("node12", "node12", "host1", "docker", NodeType.tenant);
tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant);
tester.setNodeState("node11", Node.State.active);
- assertEquals(6, tester.nodeRepository().nodes().getNodes().size());
+ assertEquals(6, tester.nodeRepository().nodes().list().size());
try {
tester.nodeRepository().nodes().removeRecursively("host1");
@@ -128,21 +128,21 @@ public class NodeRepositoryTest {
} catch (IllegalArgumentException ignored) {
// Expected
}
- assertEquals(6, tester.nodeRepository().nodes().getNodes().size());
+ assertEquals(6, tester.nodeRepository().nodes().list().size());
// Should be OK to delete host2 as both host2 and its only child, node20, are in state provisioned
tester.nodeRepository().nodes().removeRecursively("host2");
- assertEquals(5, tester.nodeRepository().nodes().getNodes().size());
- assertEquals(Node.State.deprovisioned, tester.nodeRepository().nodes().getNode("host2").get().state());
+ assertEquals(5, tester.nodeRepository().nodes().list().size());
+ assertEquals(Node.State.deprovisioned, tester.nodeRepository().nodes().node("host2").get().state());
// Now node10 is in provisioned, set node11 to failed and node12 to ready, and it should be OK to delete host1
tester.nodeRepository().nodes().fail("node11", Agent.system, getClass().getSimpleName());
tester.nodeRepository().nodes().setReady("node12", Agent.system, getClass().getSimpleName());
tester.nodeRepository().nodes().removeRecursively("node12"); // Remove one of the children first instead
- assertEquals(4, tester.nodeRepository().nodes().getNodes().size());
+ assertEquals(4, tester.nodeRepository().nodes().list().size());
tester.nodeRepository().nodes().removeRecursively("host1");
- assertEquals(Node.State.deprovisioned, tester.nodeRepository().nodes().getNode("host1").get().state());
- assertEquals(IP.Config.EMPTY.primary(), tester.nodeRepository().nodes().getNode("host1").get().ipConfig().primary());
+ assertEquals(Node.State.deprovisioned, tester.nodeRepository().nodes().node("host1").get().state());
+ assertEquals(IP.Config.EMPTY.primary(), tester.nodeRepository().nodes().node("host1").get().ipConfig().primary());
}
@Test
@@ -155,13 +155,13 @@ public class NodeRepositoryTest {
tester.addNode("id2", cfg1, cfghost1, "docker", NodeType.config);
tester.setNodeState(cfghost1, Node.State.active);
tester.setNodeState(cfg1, Node.State.active);
- assertEquals(2, tester.nodeRepository().nodes().getNodes().size());
+ assertEquals(2, tester.nodeRepository().nodes().list().size());
try {
tester.nodeRepository().nodes().removeRecursively(cfghost1);
fail("Should not be able to delete host node, one of the children is in state active");
} catch (IllegalArgumentException ignored) { }
- assertEquals(2, tester.nodeRepository().nodes().getNodes().size());
+ assertEquals(2, tester.nodeRepository().nodes().list().size());
// Fail host and container
tester.nodeRepository().nodes().failRecursively(cfghost1, Agent.system, getClass().getSimpleName());
@@ -179,7 +179,7 @@ public class NodeRepositoryTest {
tester.clock().advance(Duration.ofSeconds(1));
tester.addHost("id1", "host1", "default", NodeType.host);
tester.addHost("id2", "host2", "default", NodeType.host);
- assertFalse(tester.nodeRepository().nodes().getNode("host1").get().history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
+ assertFalse(tester.nodeRepository().nodes().node("host1").get().history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
// Set host 1 properties and deprovision it
try (var lock = tester.nodeRepository().nodes().lockAndGetRequired("host1")) {
@@ -192,16 +192,16 @@ public class NodeRepositoryTest {
tester.nodeRepository().nodes().removeRecursively("host1");
// Host 1 is deprovisioned and unwanted properties are cleared
- Node host1 = tester.nodeRepository().nodes().getNode("host1").get();
+ Node host1 = tester.nodeRepository().nodes().node("host1").get();
assertEquals(Node.State.deprovisioned, host1.state());
assertTrue(host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
// Adding it again preserves some information from the deprovisioned host and removes it
tester.addHost("id2", "host1", "default", NodeType.host);
- host1 = tester.nodeRepository().nodes().getNode("host1").get();
+ host1 = tester.nodeRepository().nodes().node("host1").get();
assertEquals("This is the newly added node", "id2", host1.id());
assertFalse("The old 'host1' is removed",
- tester.nodeRepository().nodes().getNode("host1", Node.State.deprovisioned).isPresent());
+ tester.nodeRepository().nodes().node("host1", Node.State.deprovisioned).isPresent());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToRetire());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToDeprovision());
assertTrue("Transferred from deprovisioned host", host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
@@ -225,7 +225,7 @@ public class NodeRepositoryTest {
tester.setNodeState("node12", Node.State.active);
tester.setNodeState("node20", Node.State.failed);
- assertEquals(6, tester.nodeRepository().nodes().getNodes().size());
+ assertEquals(6, tester.nodeRepository().nodes().list().size());
// Should be OK to dirty host2 as it is in provisioned and its only child is in failed
tester.nodeRepository().nodes().deallocateRecursively("host2", Agent.system, NodeRepositoryTest.class.getSimpleName());
@@ -267,8 +267,8 @@ public class NodeRepositoryTest {
tester.setNodeState("node1", Node.State.failed);
tester.nodeRepository().nodes().breakfixRecursively("host1", Agent.system, reason);
- assertEquals(1, tester.nodeRepository().nodes().getNodes().size());
- Node node = tester.nodeRepository().nodes().getNodes().get(0);
+ assertEquals(1, tester.nodeRepository().nodes().list().size());
+ Node node = tester.nodeRepository().nodes().list().first().get();
assertEquals("host1", node.hostname());
assertEquals(Node.State.breakfixed, node.state());
}
@@ -279,7 +279,7 @@ public class NodeRepositoryTest {
private static Set<String> filterNodes(NodeRepositoryTester tester, Predicate<Node> filter) {
return tester.nodeRepository().nodes()
- .getNodes().stream()
+ .list().stream()
.filter(filter)
.map(Node::hostname)
.collect(Collectors.toSet());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java
index 5e3b474e48b..195a27f21a3 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java
@@ -52,7 +52,7 @@ public class NodeRepositoryTester {
public MockCurator curator() { return curator; }
public List<Node> getNodes(NodeType type, Node.State ... inState) {
- return nodeRepository.nodes().getNodes(type, inState);
+ return nodeRepository.nodes().list(inState).nodeType(type).asList();
}
public Node addHost(String id, String hostname, String flavor, NodeType type) {
@@ -79,7 +79,7 @@ public class NodeRepositoryTester {
* of valid state transitions
*/
public void setNodeState(String hostname, Node.State state) {
- Node node = nodeRepository.nodes().getNode(hostname).orElseThrow(RuntimeException::new);
+ Node node = nodeRepository.nodes().node(hostname).orElseThrow(RuntimeException::new);
nodeRepository.database().writeTo(state, node, Agent.system, Optional.empty());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
index b17c8fc35b6..60eb66c1779 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
@@ -80,7 +80,7 @@ public class RealDataScenarioTest {
};
deploy(tester, app, specs, capacities);
- tester.nodeRepository().nodes().list(app).cluster(specs[1].id()).forEach(System.out::println);
+ tester.nodeRepository().nodes().list().owner(app).cluster(specs[1].id()).forEach(System.out::println);
}
private void deploy(ProvisioningTester tester, ApplicationId app, ClusterSpec[] specs, Capacity[] capacities) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
index 5f7b4bc865c..3a74c3a3cf6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
@@ -60,7 +60,7 @@ public class AutoscalingIntegrationTest {
tester.nodeRepository().applications().put(application, lock);
}
var scaledResources = autoscaler.suggest(application.clusters().get(cluster1.id()),
- tester.nodeRepository().nodes().list(application1));
+ tester.nodeRepository().nodes().list().owner(application1));
assertTrue(scaledResources.isPresent());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index a5f3d5f2828..dbab02302f8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -116,7 +116,7 @@ public class AutoscalingTest {
// deploy with slow
tester.deploy(application1, cluster1, 5, 1, hostResources);
- tester.nodeRepository().nodes().getNodes(application1).stream()
+ tester.nodeRepository().nodes().list().owner(application1).stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.slow);
tester.clock().advance(Duration.ofDays(2));
@@ -132,7 +132,7 @@ public class AutoscalingTest {
assertEquals("Disk speed from min/max is used",
NodeResources.DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
tester.deploy(application1, cluster1, scaledResources);
- tester.nodeRepository().nodes().getNodes(application1).stream()
+ tester.nodeRepository().nodes().list().owner(application1).stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
index e3148ad5de0..eb490079c98 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
@@ -17,6 +17,7 @@ import com.yahoo.config.provision.Zone;
import com.yahoo.test.ManualClock;
import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.Nodelike;
import com.yahoo.vespa.hosted.provision.applications.Application;
@@ -95,9 +96,9 @@ class AutoscalingTester {
}
public void makeReady(String hostname) {
- Node node = nodeRepository().nodes().getNode(hostname).get();
+ Node node = nodeRepository().nodes().node(hostname).get();
provisioningTester.patchNode(node, (n) -> n.with(new IP.Config(Set.of("::" + 0 + ":0"), Set.of())));
- Node host = nodeRepository().nodes().getNode(node.parentHostname().get()).get();
+ Node host = nodeRepository().nodes().node(node.parentHostname().get()).get();
host = host.with(new IP.Config(Set.of("::" + 0 + ":0"), Set.of("::" + 0 + ":2")));
if (host.state() == Node.State.provisioned)
nodeRepository().nodes().setReady(List.of(host), Agent.system, getClass().getSimpleName());
@@ -105,7 +106,7 @@ class AutoscalingTester {
public void deactivateRetired(ApplicationId application, ClusterSpec cluster, ClusterResources resources) {
try (Mutex lock = nodeRepository().nodes().lock(application)){
- for (Node node : nodeRepository().nodes().getNodes(application, Node.State.active)) {
+ for (Node node : nodeRepository().nodes().list(Node.State.active).owner(application)) {
if (node.allocation().get().membership().retired())
nodeRepository().nodes().write(node.with(node.allocation().get().removable(true)), lock);
}
@@ -125,7 +126,7 @@ class AutoscalingTester {
*/
public void addCpuMeasurements(float value, float otherResourcesLoad,
int count, ApplicationId applicationId) {
- List<Node> nodes = nodeRepository().nodes().getNodes(applicationId, Node.State.active);
+ NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size());
for (int i = 0; i < count; i++) {
clock().advance(Duration.ofMinutes(1));
@@ -156,7 +157,7 @@ class AutoscalingTester {
*/
public void addMemMeasurements(float value, float otherResourcesLoad,
int count, ApplicationId applicationId) {
- List<Node> nodes = nodeRepository().nodes().getNodes(applicationId, Node.State.active);
+ NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size());
for (int i = 0; i < count; i++) {
clock().advance(Duration.ofMinutes(1));
@@ -181,7 +182,7 @@ class AutoscalingTester {
public void addMeasurements(float cpu, float memory, float disk, int generation, boolean inService, boolean stable,
int count, ApplicationId applicationId) {
- List<Node> nodes = nodeRepository().nodes().getNodes(applicationId, Node.State.active);
+ NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
for (int i = 0; i < count; i++) {
clock().advance(Duration.ofMinutes(1));
for (Node node : nodes) {
@@ -204,7 +205,7 @@ class AutoscalingTester {
nodeRepository().applications().put(application, lock);
}
return autoscaler.autoscale(application.clusters().get(clusterId),
- nodeRepository().nodes().list(applicationId, Node.State.active));
+ nodeRepository().nodes().list(Node.State.active).owner(applicationId));
}
public Autoscaler.Advice suggest(ApplicationId applicationId, ClusterSpec.Id clusterId,
@@ -215,7 +216,7 @@ class AutoscalingTester {
nodeRepository().applications().put(application, lock);
}
return autoscaler.suggest(application.clusters().get(clusterId),
- nodeRepository().nodes().list(applicationId, Node.State.active));
+ nodeRepository().nodes().list(Node.State.active).owner(applicationId));
}
public ClusterResources assertResources(String message,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java
index 1ea4abab17b..384e8dd8439 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java
@@ -78,8 +78,8 @@ public class MetricsV2MetricsFetcherTest {
{
httpClient.cannedResponse = cannedResponseForApplication2;
try (Mutex lock = tester.nodeRepository().nodes().lock(application1)) {
- tester.nodeRepository().nodes().write(tester.nodeRepository().nodes().getNodes(application2, Node.State.active)
- .get(0).retire(tester.clock().instant()), lock);
+ tester.nodeRepository().nodes().write(tester.nodeRepository().nodes().list(Node.State.active).owner(application2)
+ .first().get().retire(tester.clock().instant()), lock);
}
List<Pair<String, MetricSnapshot>> values = new ArrayList<>(fetcher.fetchMetrics(application2).get().metrics());
assertFalse(values.get(0).getSecond().stable());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java
index d6e6a7548c2..887ce158e09 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java
@@ -13,6 +13,7 @@ import com.yahoo.config.provision.Zone;
import com.yahoo.config.provisioning.FlavorsConfig;
import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
@@ -71,7 +72,7 @@ public class AutoscalingMaintainerTester {
}
public void addMeasurements(float cpu, float mem, float disk, long generation, int count, ApplicationId applicationId) {
- List<Node> nodes = nodeRepository().nodes().getNodes(applicationId, Node.State.active);
+ NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
for (int i = 0; i < count; i++) {
for (Node node : nodes)
metricsDb.add(List.of(new Pair<>(node.hostname(), new MetricSnapshot(clock().instant(),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java
index 038d5729f0e..3031bbc9819 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java
@@ -28,7 +28,7 @@ public class CapacityCheckerTest {
tester.populateNodeRepositoryFromJsonFile(Paths.get(path));
var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure();
assertTrue(failurePath.isPresent());
- assertTrue(tester.nodeRepository.nodes().getNodes(NodeType.host).containsAll(failurePath.get().hostsCausingFailure));
+ assertTrue(tester.nodeRepository.nodes().list().nodeType(NodeType.host).asList().containsAll(failurePath.get().hostsCausingFailure));
assertEquals(5, failurePath.get().hostsCausingFailure.size());
}
@@ -39,7 +39,7 @@ public class CapacityCheckerTest {
10, new NodeResources(-1, 10, 100, 1), 10,
0, new NodeResources(1, 10, 100, 1), 10);
int overcommittedHosts = tester.capacityChecker.findOvercommittedHosts().size();
- assertEquals(tester.nodeRepository.nodes().getNodes(NodeType.host).size(), overcommittedHosts);
+ assertEquals(tester.nodeRepository.nodes().list().nodeType(NodeType.host).size(), overcommittedHosts);
}
@Test
@@ -63,7 +63,7 @@ public class CapacityCheckerTest {
assertTrue(failurePath.isPresent());
assertTrue("Computing worst case host loss if all hosts have to be removed should result in an non-empty failureReason with empty nodes.",
failurePath.get().failureReason.tenant.isEmpty() && failurePath.get().failureReason.host.isEmpty());
- assertEquals(tester.nodeRepository.nodes().getNodes(NodeType.host).size(), failurePath.get().hostsCausingFailure.size());
+ assertEquals(tester.nodeRepository.nodes().list().nodeType(NodeType.host).size(), failurePath.get().hostsCausingFailure.size());
}
{
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java
index 2f735742ed5..26370709fa3 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java
@@ -20,6 +20,7 @@ import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.custom.ClusterCapacity;
import com.yahoo.vespa.flags.custom.SharedHost;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Address;
import com.yahoo.vespa.hosted.provision.node.Agent;
@@ -62,9 +63,9 @@ public class DynamicProvisioningMaintainerTest {
var tester = new DynamicProvisioningTester().addInitialNodes();
tester.hostProvisioner.with(Behaviour.failDeprovisioning); // To avoid deleting excess nodes
- Node host3 = tester.nodeRepository.nodes().getNode("host3").orElseThrow();
- Node host4 = tester.nodeRepository.nodes().getNode("host4").orElseThrow();
- Node host41 = tester.nodeRepository.nodes().getNode("host4-1").orElseThrow();
+ Node host3 = tester.nodeRepository.nodes().node("host3").orElseThrow();
+ Node host4 = tester.nodeRepository.nodes().node("host4").orElseThrow();
+ Node host41 = tester.nodeRepository.nodes().node("host4-1").orElseThrow();
assertTrue("No IP addresses assigned",
Stream.of(host3, host4, host41).map(node -> node.ipConfig().primary()).allMatch(Set::isEmpty));
@@ -73,9 +74,9 @@ public class DynamicProvisioningMaintainerTest {
Node host41new = host41.with(host41.ipConfig().withPrimary(Set.of("::4:1", "::4:2")));
tester.maintainer.maintain();
- assertEquals(host3new, tester.nodeRepository.nodes().getNode("host3").get());
- assertEquals(host4new, tester.nodeRepository.nodes().getNode("host4").get());
- assertEquals(host41new, tester.nodeRepository.nodes().getNode("host4-1").get());
+ assertEquals(host3new, tester.nodeRepository.nodes().node("host3").get());
+ assertEquals(host4new, tester.nodeRepository.nodes().node("host4").get());
+ assertEquals(host41new, tester.nodeRepository.nodes().node("host4-1").get());
}
@Test
@@ -88,29 +89,29 @@ public class DynamicProvisioningMaintainerTest {
tester.maintainer.maintain();
assertEquals(Set.of("host4", "host4-1"),
- tester.nodeRepository.nodes().getNodes(Node.State.failed).stream().map(Node::hostname).collect(Collectors.toSet()));
+ tester.nodeRepository.nodes().list(Node.State.failed).stream().map(Node::hostname).collect(Collectors.toSet()));
}
@Test
public void finds_nodes_that_need_deprovisioning_without_pre_provisioning() {
var tester = new DynamicProvisioningTester().addInitialNodes();
- assertTrue(tester.nodeRepository.nodes().getNode("host2").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("host3").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("host2").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("host3").isPresent());
tester.maintainer.maintain();
- assertTrue(tester.nodeRepository.nodes().getNode("host2").isEmpty());
- assertTrue(tester.nodeRepository.nodes().getNode("host3").isEmpty());
+ assertTrue(tester.nodeRepository.nodes().node("host2").isEmpty());
+ assertTrue(tester.nodeRepository.nodes().node("host3").isEmpty());
}
@Test
public void does_not_deprovision_when_preprovisioning_enabled() {
var tester = new DynamicProvisioningTester().addInitialNodes();
tester.flagSource.withListFlag(PermanentFlags.PREPROVISION_CAPACITY.id(), List.of(new ClusterCapacity(1, 1, 3, 2, 1.0)), ClusterCapacity.class);
- Optional<Node> failedHost = tester.nodeRepository.nodes().getNode("host2");
+ Optional<Node> failedHost = tester.nodeRepository.nodes().node("host2");
assertTrue(failedHost.isPresent());
tester.maintainer.maintain();
- assertTrue("Failed host is deprovisioned", tester.nodeRepository.nodes().getNode(failedHost.get().hostname()).isEmpty());
+ assertTrue("Failed host is deprovisioned", tester.nodeRepository.nodes().node(failedHost.get().hostname()).isEmpty());
assertEquals(1, tester.hostProvisioner.deprovisionedHosts);
}
@@ -123,24 +124,24 @@ public class DynamicProvisioningMaintainerTest {
ClusterCapacity.class);
assertEquals(0, tester.hostProvisioner.provisionedHosts.size());
- assertEquals(11, tester.nodeRepository.nodes().getNodes().size());
- assertTrue(tester.nodeRepository.nodes().getNode("host2").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("host2-1").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("host3").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("hostname100").isEmpty());
- assertTrue(tester.nodeRepository.nodes().getNode("hostname101").isEmpty());
+ assertEquals(11, tester.nodeRepository.nodes().list().size());
+ assertTrue(tester.nodeRepository.nodes().node("host2").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("host2-1").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("host3").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("hostname100").isEmpty());
+ assertTrue(tester.nodeRepository.nodes().node("hostname101").isEmpty());
tester.maintainer.maintain();
assertEquals(2, tester.hostProvisioner.provisionedHosts.size());
assertEquals(2, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10)));
- List<Node> nodesAfter = tester.nodeRepository.nodes().getNodes();
+ NodeList nodesAfter = tester.nodeRepository.nodes().list();
assertEquals(11, nodesAfter.size()); // 2 removed, 2 added
- assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().getNode("host2").isEmpty());
- assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().getNode("host2-1").isEmpty());
- assertTrue("Host satisfying 16-24-100-1 is kept", tester.nodeRepository.nodes().getNode("host3").isPresent());
- assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().getNode("hostname100").isPresent());
- assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().getNode("hostname101").isPresent());
+ assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().node("host2").isEmpty());
+ assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().node("host2-1").isEmpty());
+ assertTrue("Host satisfying 16-24-100-1 is kept", tester.nodeRepository.nodes().node("host3").isPresent());
+ assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().node("hostname100").isPresent());
+ assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().node("hostname101").isPresent());
}
@Test
@@ -154,11 +155,11 @@ public class DynamicProvisioningMaintainerTest {
ClusterCapacity.class);
assertEquals(0, tester.hostProvisioner.provisionedHosts.size());
- assertEquals(11, tester.nodeRepository.nodes().getNodes().size());
- assertTrue(tester.nodeRepository.nodes().getNode("host2").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("host2-1").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("host3").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("hostname100").isEmpty());
+ assertEquals(11, tester.nodeRepository.nodes().list().size());
+ assertTrue(tester.nodeRepository.nodes().node("host2").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("host2-1").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("host3").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("hostname100").isEmpty());
// The first cluster will be allocated to host3 and a new host hostname100.
// hostname100 will be a large shared host specified above.
@@ -194,10 +195,10 @@ public class DynamicProvisioningMaintainerTest {
assertEquals(2, tester.hostProvisioner.provisionedHosts.size());
assertEquals(2, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10)));
- assertEquals(10, tester.nodeRepository.nodes().getNodes().size()); // 3 removed, 2 added
- assertTrue("preprovision capacity is prefered on shared hosts", tester.nodeRepository.nodes().getNode("host3").isEmpty());
- assertTrue(tester.nodeRepository.nodes().getNode("hostname100").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("hostname101").isPresent());
+ assertEquals(10, tester.nodeRepository.nodes().list().size()); // 3 removed, 2 added
+ assertTrue("preprovision capacity is prefered on shared hosts", tester.nodeRepository.nodes().node("host3").isEmpty());
+ assertTrue(tester.nodeRepository.nodes().node("hostname100").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("hostname101").isPresent());
// If the preprovision capacity is reduced, we should see shared hosts deprovisioned.
@@ -210,13 +211,13 @@ public class DynamicProvisioningMaintainerTest {
assertEquals("one provisioned host has been deprovisioned, so there are 2 -> 1 provisioned hosts",
1, tester.hostProvisioner.provisionedHosts.size());
assertEquals(1, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10)));
- assertEquals(9, tester.nodeRepository.nodes().getNodes().size()); // 4 removed, 2 added
- if (tester.nodeRepository.nodes().getNode("hostname100").isPresent()) {
+ assertEquals(9, tester.nodeRepository.nodes().list().size()); // 4 removed, 2 added
+ if (tester.nodeRepository.nodes().node("hostname100").isPresent()) {
assertTrue("hostname101 is superfluous and should have been deprovisioned",
- tester.nodeRepository.nodes().getNode("hostname101").isEmpty());
+ tester.nodeRepository.nodes().node("hostname101").isEmpty());
} else {
assertTrue("hostname101 is required for preprovision capacity",
- tester.nodeRepository.nodes().getNode("hostname101").isPresent());
+ tester.nodeRepository.nodes().node("hostname101").isPresent());
}
}
@@ -224,11 +225,11 @@ public class DynamicProvisioningMaintainerTest {
private void verifyFirstMaintain(DynamicProvisioningTester tester) {
assertEquals(1, tester.hostProvisioner.provisionedHosts.size());
assertEquals(1, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10)));
- assertEquals(10, tester.nodeRepository.nodes().getNodes().size()); // 2 removed, 1 added
- assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().getNode("host2").isEmpty());
- assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().getNode("host2-1").isEmpty());
- assertTrue("One 1-30-20-3 node fits on host3", tester.nodeRepository.nodes().getNode("host3").isPresent());
- assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().getNode("hostname100").isPresent());
+ assertEquals(10, tester.nodeRepository.nodes().list().size()); // 2 removed, 1 added
+ assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().node("host2").isEmpty());
+ assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().node("host2-1").isEmpty());
+ assertTrue("One 1-30-20-3 node fits on host3", tester.nodeRepository.nodes().node("host3").isPresent());
+ assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().node("hostname100").isPresent());
}
@Test
@@ -282,7 +283,7 @@ public class DynamicProvisioningMaintainerTest {
tester.hostProvisioner.with(Behaviour.failDeprovisioning);
tester.maintainer.maintain();
- assertTrue(tester.nodeRepository.nodes().getNode(host2.hostname()).isPresent());
+ assertTrue(tester.nodeRepository.nodes().node(host2.hostname()).isPresent());
}
@Test
@@ -338,10 +339,10 @@ public class DynamicProvisioningMaintainerTest {
List.of(new ClusterCapacity(3, 0, 0, 0, 0.0)),
ClusterCapacity.class);
assertEquals(0, tester.provisionedHostsMatching(sharedHostNodeResources));
- assertTrue(tester.nodeRepository.nodes().getNode("hostname102").isEmpty());
+ assertTrue(tester.nodeRepository.nodes().node("hostname102").isEmpty());
tester.maintainer.maintain();
assertEquals(1, tester.provisionedHostsMatching(sharedHostNodeResources));
- assertTrue(tester.nodeRepository.nodes().getNode("hostname102").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("hostname102").isPresent());
// Next maintenance run does nothing
tester.assertNodesUnchanged();
@@ -366,14 +367,14 @@ public class DynamicProvisioningMaintainerTest {
ClusterCapacity.class);
assertEquals(1, tester.provisionedHostsMatching(sharedHostNodeResources));
- assertTrue(tester.nodeRepository.nodes().getNode("hostname102").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("hostname103").isEmpty());
- assertTrue(tester.nodeRepository.nodes().getNode("hostname104").isEmpty());
+ assertTrue(tester.nodeRepository.nodes().node("hostname102").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("hostname103").isEmpty());
+ assertTrue(tester.nodeRepository.nodes().node("hostname104").isEmpty());
tester.maintainer.maintain();
assertEquals(3, tester.provisionedHostsMatching(sharedHostNodeResources));
- assertTrue(tester.nodeRepository.nodes().getNode("hostname102").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("hostname103").isPresent());
- assertTrue(tester.nodeRepository.nodes().getNode("hostname104").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("hostname102").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("hostname103").isPresent());
+ assertTrue(tester.nodeRepository.nodes().node("hostname104").isPresent());
}
@Test
@@ -381,7 +382,7 @@ public class DynamicProvisioningMaintainerTest {
var tester = new DynamicProvisioningTester().addInitialNodes();
tester.hostProvisioner.with(Behaviour.failDnsUpdate);
- Supplier<List<Node>> provisioning = () -> tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.provisioned);
+ Supplier<NodeList> provisioning = () -> tester.nodeRepository.nodes().list(Node.State.provisioned).nodeType(NodeType.host);
assertEquals(2, provisioning.get().size());
tester.maintainer.maintain();
@@ -482,9 +483,9 @@ public class DynamicProvisioningMaintainerTest {
}
private void assertNodesUnchanged() {
- List<Node> nodes = nodeRepository.nodes().getNodes();
+ NodeList nodes = nodeRepository.nodes().list();
maintainer.maintain();
- assertEquals("Nodes are unchanged after maintenance run", nodes, nodeRepository.nodes().getNodes());
+ assertEquals("Nodes are unchanged after maintenance run", nodes, nodeRepository.nodes().list());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
index d02d08f7736..2191963de8a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
@@ -268,7 +268,7 @@ public class FailedExpirerTest {
}
public Node get(String hostname) {
- return nodeRepository.nodes().getNode(hostname)
+ return nodeRepository.nodes().node(hostname)
.orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname));
}
@@ -341,7 +341,7 @@ public class FailedExpirerTest {
public void assertNodesIn(Node.State state, String... hostnames) {
assertEquals(Stream.of(hostnames).collect(Collectors.toSet()),
nodeRepository.nodes()
- .getNodes(state).stream()
+ .list(state).stream()
.map(Node::hostname)
.collect(Collectors.toSet()));
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java
index 4bee276af6d..81880de4d92 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java
@@ -15,6 +15,7 @@ import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
@@ -65,14 +66,14 @@ public class InactiveAndFailedExpirerTest {
// Inactive times out
tester.advanceTime(Duration.ofMinutes(14));
new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run();
- assertEquals(0, tester.nodeRepository().nodes().getNodes(Node.State.inactive).size());
- List<Node> dirty = tester.nodeRepository().nodes().getNodes(Node.State.dirty);
+ assertEquals(0, tester.nodeRepository().nodes().list(Node.State.inactive).size());
+ NodeList dirty = tester.nodeRepository().nodes().list(Node.State.dirty);
assertEquals(2, dirty.size());
- assertFalse(dirty.get(0).allocation().isPresent());
- assertFalse(dirty.get(1).allocation().isPresent());
+ assertFalse(dirty.asList().get(0).allocation().isPresent());
+ assertFalse(dirty.asList().get(1).allocation().isPresent());
// One node is set back to ready
- Node ready = tester.nodeRepository().nodes().setReady(Collections.singletonList(dirty.get(0)), Agent.system, getClass().getSimpleName()).get(0);
+ Node ready = tester.nodeRepository().nodes().setReady(Collections.singletonList(dirty.asList().get(0)), Agent.system, getClass().getSimpleName()).get(0);
assertEquals("Allocated history is removed on readying",
Arrays.asList(History.Event.Type.provisioned, History.Event.Type.readied),
ready.history().events().stream().map(History.Event::type).collect(Collectors.toList()));
@@ -80,10 +81,10 @@ public class InactiveAndFailedExpirerTest {
// Dirty times out for the other one
tester.advanceTime(Duration.ofMinutes(14));
new DirtyExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run();
- assertEquals(0, tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.dirty).size());
- List<Node> failed = tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.failed);
+ assertEquals(0, tester.nodeRepository().nodes().list(Node.State.dirty).nodeType(NodeType.tenant).size());
+ NodeList failed = tester.nodeRepository().nodes().list(Node.State.failed).nodeType(NodeType.tenant);
assertEquals(1, failed.size());
- assertEquals(1, failed.get(0).status().failCount());
+ assertEquals(1, failed.first().get().status().failCount());
}
@Test
@@ -108,11 +109,11 @@ public class InactiveAndFailedExpirerTest {
// Inactive times out and node is moved to dirty
tester.advanceTime(Duration.ofMinutes(14));
new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run();
- List<Node> dirty = tester.nodeRepository().nodes().getNodes(Node.State.dirty);
+ NodeList dirty = tester.nodeRepository().nodes().list(Node.State.dirty);
assertEquals(2, dirty.size());
// Reboot generation is increased
- assertEquals(wantedRebootGeneration + 1, dirty.get(0).status().reboot().wanted());
+ assertEquals(wantedRebootGeneration + 1, dirty.first().get().status().reboot().wanted());
}
@Test
@@ -154,12 +155,12 @@ public class InactiveAndFailedExpirerTest {
doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any());
new RetiredExpirer(tester.nodeRepository(), tester.orchestrator(), deployer, new TestMetric(),
Duration.ofDays(30), Duration.ofMinutes(10)).run();
- assertEquals(1, tester.nodeRepository().nodes().getNodes(Node.State.inactive).size());
+ assertEquals(1, tester.nodeRepository().nodes().list(Node.State.inactive).size());
// Inactive times out and one node is moved to parked
tester.advanceTime(Duration.ofMinutes(11)); // Trigger InactiveExpirer
new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run();
- assertEquals(1, tester.nodeRepository().nodes().getNodes(Node.State.parked).size());
+ assertEquals(1, tester.nodeRepository().nodes().list(Node.State.parked).size());
}
@Test
@@ -181,11 +182,10 @@ public class InactiveAndFailedExpirerTest {
// See that nodes are moved to dirty immediately.
new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run();
- assertEquals(0, tester.nodeRepository().nodes().getNodes(Node.State.inactive).size());
- List<Node> dirty = tester.nodeRepository().nodes().getNodes(Node.State.dirty);
+ assertEquals(0, tester.nodeRepository().nodes().list(Node.State.inactive).size());
+ NodeList dirty = tester.nodeRepository().nodes().list(Node.State.dirty);
assertEquals(1, dirty.size());
- assertFalse(dirty.get(0).allocation().isPresent());
-
+ assertFalse(dirty.first().get().allocation().isPresent());
}
@Test
@@ -206,7 +206,7 @@ public class InactiveAndFailedExpirerTest {
tester.patchNodes(inactiveNodes, (node) -> node.withWantToRetire(true, true, Agent.system, tester.clock().instant()));
tester.advanceTime(Duration.ofMinutes(11));
new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run();
- assertEquals(2, tester.nodeRepository().nodes().getNodes(Node.State.parked).size());
+ assertEquals(2, tester.nodeRepository().nodes().list(Node.State.parked).size());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
index 832f8c0c318..189b32028a2 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
@@ -132,7 +132,7 @@ public class LoadBalancerExpirerTest {
}
private void dirtyNodesOf(ApplicationId application, ClusterSpec.Id cluster) {
- tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().getNodes(application).stream()
+ tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list().owner(application).stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().cluster().id().equals(cluster))
.collect(Collectors.toList()),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
index 6cfd95e828a..b20821bd4a6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
@@ -181,7 +181,7 @@ public class MetricsReporterTest {
}
NestedTransaction transaction = new NestedTransaction();
- nodeRepository.nodes().activate(nodeRepository.nodes().getNodes(NodeType.host), transaction);
+ nodeRepository.nodes().activate(nodeRepository.nodes().list().nodeType(NodeType.host).asList(), transaction);
transaction.commit();
Orchestrator orchestrator = mock(Orchestrator.class);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
index dba3ca6a92e..9801233f396 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
@@ -18,6 +18,7 @@ import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
@@ -102,8 +103,8 @@ public class NodeFailTester {
tester.activate(app1, clusterApp1, capacity1);
tester.activate(app2, clusterApp2, capacity2);
- assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().getNodes(app1, Node.State.active).size());
- assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().getNodes(app2, Node.State.active).size());
+ assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app1).size());
+ assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app2).size());
Map<ApplicationId, MockDeployer.ApplicationContext> apps = Map.of(
app1, new MockDeployer.ApplicationContext(app1, clusterApp1, capacity1),
@@ -132,10 +133,10 @@ public class NodeFailTester {
tester.activate(tenantHostApp, clusterNodeAdminApp, allHosts);
tester.activate(app1, clusterApp1, capacity1);
tester.activate(app2, clusterApp2, capacity2);
- assertEquals(Set.of(tester.nodeRepository.nodes().getNodes(NodeType.host)),
- Set.of(tester.nodeRepository.nodes().getNodes(tenantHostApp, Node.State.active)));
- assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().getNodes(app1, Node.State.active).size());
- assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().getNodes(app2, Node.State.active).size());
+ assertEquals(Set.of(tester.nodeRepository.nodes().list().nodeType(NodeType.host).asList()),
+ Set.of(tester.nodeRepository.nodes().list(Node.State.active).owner(tenantHostApp).asList()));
+ assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app1).size());
+ assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app2).size());
Map<ApplicationId, MockDeployer.ApplicationContext> apps = Map.of(
tenantHostApp, new MockDeployer.ApplicationContext(tenantHostApp, clusterNodeAdminApp, allHosts),
@@ -163,7 +164,7 @@ public class NodeFailTester {
Capacity allNodes = Capacity.fromRequiredNodeType(nodeType);
ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
tester.activate(app1, clusterApp1, allNodes);
- assertEquals(count, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.active).size());
+ assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
Map<ApplicationId, MockDeployer.ApplicationContext> apps = Map.of(
app1, new MockDeployer.ApplicationContext(app1, clusterApp1, allNodes));
@@ -212,7 +213,7 @@ public class NodeFailTester {
}
public void allNodesMakeAConfigRequestExcept(List<Node> deadNodes) {
- for (Node node : nodeRepository.nodes().getNodes()) {
+ for (Node node : nodeRepository.nodes().list()) {
if ( ! deadNodes.contains(node))
hostLivenessTracker.receivedRequestFrom(node.hostname());
}
@@ -288,7 +289,7 @@ public class NodeFailTester {
}
/** Returns the node with the highest membership index from the given set of allocated nodes */
- public Node highestIndex(List<Node> nodes) {
+ public Node highestIndex(NodeList nodes) {
Node highestIndex = null;
for (Node node : nodes) {
if (highestIndex == null || node.allocation().get().membership().index() >
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
index 50b99afbca5..ca1fa2831b8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
@@ -9,6 +9,7 @@ import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.applicationmodel.ServiceInstance;
import com.yahoo.vespa.applicationmodel.ServiceStatus;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Report;
@@ -51,7 +52,7 @@ public class NodeFailerTest {
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
// Set failure report to the parent and all its children.
- tester.nodeRepository.nodes().getNodes().stream()
+ tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
@@ -96,19 +97,19 @@ public class NodeFailerTest {
.map(Node::state).collect(Collectors.toSet());
assertEquals(Set.of(Node.State.failed), childStates2Iter);
// The host itself is still active as it too must be allowed to suspend
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(hostWithHwFailure).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
tester.suspend(hostWithHwFailure);
tester.runMaintainers();
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(hostWithHwFailure).get().state());
- assertEquals(4, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state());
+ assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
public void hw_fail_only_if_whole_host_is_suspended() {
NodeFailTester tester = NodeFailTester.withTwoApplicationsOnDocker(6);
String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2);
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(hostWithFailureReports).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
// The host has 2 nodes in active and 1 ready
Map<Node.State, List<String>> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithFailureReports).asList().stream()
@@ -121,7 +122,7 @@ public class NodeFailerTest {
// Set failure report to the parent and all its children.
Report badTotalMemorySizeReport = Report.basicReport("badTotalMemorySize", HARD_FAIL, Instant.now(), "too low");
- tester.nodeRepository.nodes().getNodes().stream()
+ tester.nodeRepository.nodes().list().stream()
.filter(node -> node.hostname().equals(hostWithFailureReports))
.forEach(node -> {
Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport));
@@ -131,40 +132,40 @@ public class NodeFailerTest {
// The ready node will be failed, but neither the host nor the 2 active nodes since they have not been suspended
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyChild).get().state());
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(hostWithFailureReports).get().state());
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(activeChild1).get().state());
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(activeChild2).get().state());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
// Suspending the host will not fail any more since none of the children are suspened
tester.suspend(hostWithFailureReports);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyChild).get().state());
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(hostWithFailureReports).get().state());
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(activeChild1).get().state());
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(activeChild2).get().state());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
// Suspending one child node will fail that out.
tester.suspend(activeChild1);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyChild).get().state());
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(hostWithFailureReports).get().state());
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(activeChild1).get().state());
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(activeChild2).get().state());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state());
// Suspending the second child node will fail that out and the host.
tester.suspend(activeChild2);
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyChild).get().state());
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(hostWithFailureReports).get().state());
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(activeChild1).get().state());
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(activeChild2).get().state());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild2).get().state());
}
@Test
@@ -173,33 +174,33 @@ public class NodeFailerTest {
tester.suspend(NodeFailTester.app1);
// Set two nodes down (one for each application) and wait 65 minutes
- String host_from_suspended_app = tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(1).hostname();
- String host_from_normal_app = tester.nodeRepository.nodes().getNodes(NodeFailTester.app2, Node.State.active).get(3).hostname();
+ String host_from_suspended_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
+ String host_from_normal_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(host_from_suspended_app);
tester.serviceMonitor.setHostDown(host_from_normal_app);
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(65));
tester.runMaintainers();
- assertTrue(tester.nodeRepository.nodes().getNode(host_from_normal_app).get().isDown());
- assertTrue(tester.nodeRepository.nodes().getNode(host_from_suspended_app).get().isDown());
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(host_from_normal_app).get().state());
- assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(host_from_suspended_app).get().state());
+ assertTrue(tester.nodeRepository.nodes().node(host_from_normal_app).get().isDown());
+ assertTrue(tester.nodeRepository.nodes().node(host_from_suspended_app).get().isDown());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(host_from_normal_app).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.nodes().node(host_from_suspended_app).get().state());
}
@Test
public void zone_is_not_working_if_too_many_nodes_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
- tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(0).hostname());
+ tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(0).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
- tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(1).hostname());
+ tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
- tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(2).hostname());
+ tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(2).hostname());
tester.runMaintainers();
assertFalse(tester.nodeRepository.nodes().isWorking());
@@ -219,24 +220,24 @@ public class NodeFailerTest {
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
- assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
// Hardware failures are detected on two ready nodes, which are then failed
- Node readyFail1 = tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).get(2);
- Node readyFail2 = tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).get(3);
+ Node readyFail1 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(2);
+ Node readyFail2 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(3);
tester.nodeRepository.nodes().write(readyFail1.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.nodeRepository.nodes().write(readyFail2.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
- assertEquals(4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
tester.runMaintainers();
- assertEquals(2, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyFail1.hostname()).get().state());
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyFail2.hostname()).get().state());
+ assertEquals(2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail1.hostname()).get().state());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail2.hostname()).get().state());
- String downHost1 = tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(1).hostname();
- String downHost2 = tester.nodeRepository.nodes().getNodes(NodeFailTester.app2, Node.State.active).get(3).hostname();
+ String downHost1 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
+ String downHost2 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(downHost1);
tester.serviceMonitor.setHostDown(downHost2);
// nothing happens the first 45 minutes
@@ -245,9 +246,9 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
- assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 2, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 2, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
tester.serviceMonitor.setHostUp(downHost1);
@@ -256,10 +257,10 @@ public class NodeFailerTest {
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 1, tester.deployer.redeployments);
- assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 3, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 1, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(downHost2, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).get(0).hostname());
+ assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals(downHost2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
// downHost1 fails again
tester.serviceMonitor.setHostDown(downHost1);
@@ -275,12 +276,12 @@ public class NodeFailerTest {
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals( 2, tester.deployer.redeployments);
- assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
// the last host goes down
- Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active));
+ Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1));
tester.serviceMonitor.setHostDown(lastNode.hostname());
// it is not failed because there are no ready nodes to replace it
for (int minutes = 0; minutes < 75; minutes +=5 ) {
@@ -288,9 +289,9 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 2, tester.deployer.redeployments);
- assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
// A new node is available
@@ -300,11 +301,11 @@ public class NodeFailerTest {
tester.runMaintainers();
// The node is now failed
assertEquals( 3, tester.deployer.redeployments);
- assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 5, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertTrue("The index of the last failed node is not reused",
- tester.highestIndex(tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active)).allocation().get().membership().index()
+ tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1)).allocation().get().membership().index()
>
lastNode.allocation().get().membership().index());
}
@@ -312,31 +313,31 @@ public class NodeFailerTest {
@Test
public void re_activate_grace_period_test() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
- String downNode = tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(1).hostname();
+ String downNode = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
tester.serviceMonitor.setHostDown(downNode);
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals(0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(75));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals(1, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(downNode).get().state());
+ assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
// Re-activate the node. It is still down, but should not be failed out until the grace period has passed again
tester.nodeRepository.nodes().reactivate(downNode, Agent.system, getClass().getSimpleName());
tester.clock.advance(Duration.ofMinutes(30));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals(0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
tester.clock.advance(Duration.ofMinutes(45));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals(1, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(downNode).get().state());
+ assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state());
}
@Test
@@ -349,7 +350,7 @@ public class NodeFailerTest {
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
tester.activate(NodeFailTester.app1, cluster, capacity);
- String downHost = tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(0).hostname();
+ String downHost = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).first().get().hostname();
tester.serviceMonitor.setHostDown(downHost);
// nothing happens the first 45 minutes
@@ -358,8 +359,8 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
- assertEquals(3, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals(0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
}
// downHost should now be failed and replaced
@@ -367,9 +368,9 @@ public class NodeFailerTest {
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
assertEquals(1, tester.deployer.redeployments);
- assertEquals(1, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals(3, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals(downHost, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).get(0).hostname());
+ assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(downHost, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
}
@Test
@@ -385,10 +386,10 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals( 5, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
}
- List<Node> ready = tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready);
+ NodeList ready = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant);
// Two ready nodes and a ready docker node die, but only 2 of those are failed out
tester.clock.advance(Duration.ofMinutes(180));
@@ -398,16 +399,16 @@ public class NodeFailerTest {
.collect(Collectors.toList());
tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode);
tester.runMaintainers();
- assertEquals( 3, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals( 2, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
// Another ready node dies and the node that died earlier, are allowed to fail
tester.clock.advance(Duration.ofDays(1));
tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode, otherNodes.get(3));
tester.runMaintainers();
- assertEquals( 1, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(otherNodes.get(1), tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).get(0));
- assertEquals( 4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals(otherNodes.get(1), tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).first().get());
+ assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
}
@Test
@@ -419,17 +420,17 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals( 3, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready).size());
- assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.failed).size());
+ assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
+ assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
// Two ready nodes and a ready docker node die, but only 2 of those are failed out
tester.clock.advance(Duration.ofMinutes(180));
- Node dockerHost = tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready).iterator().next();
+ Node dockerHost = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).iterator().next();
tester.allNodesMakeAConfigRequestExcept(dockerHost);
tester.runMaintainers();
- assertEquals( 3, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready).size());
- assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.failed).size());
+ assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
+ assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size());
}
@Test
@@ -441,9 +442,9 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
tester.runMaintainers();
- assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals(13, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(7, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size());
+ assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
@@ -457,9 +458,9 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals(0, tester.deployer.redeployments);
- assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals(13, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(7, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size());
+ assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
tester.clock.advance(Duration.ofMinutes(30));
@@ -467,14 +468,14 @@ public class NodeFailerTest {
tester.runMaintainers();
assertEquals(2 + 1, tester.deployer.redeployments);
- assertEquals(3, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals(10, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(6, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size());
+ assertEquals(3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(10, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
// Now lets fail an active tenant node
- Node downTenant1 = tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).get(0);
+ Node downTenant1 = tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).first().get();
tester.serviceMonitor.setHostDown(downTenant1.hostname());
// nothing happens during the entire day because of the failure throttling
@@ -482,7 +483,7 @@ public class NodeFailerTest {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(interval));
tester.allNodesMakeAConfigRequestExcept();
- assertEquals(3 + 1, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(3 + 1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
tester.clock.advance(Duration.ofMinutes(30));
@@ -490,10 +491,10 @@ public class NodeFailerTest {
tester.runMaintainers();
assertEquals(3 + 1, tester.deployer.redeployments);
- assertEquals(4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals(9, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(6, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size());
+ assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(9, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
// Lets fail another host, make sure it is not the same where downTenant1 is a child
@@ -505,10 +506,10 @@ public class NodeFailerTest {
tester.runMaintainers();
assertEquals(5 + 2, tester.deployer.redeployments);
- assertEquals(7, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals(6, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(5, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size());
+ assertEquals(7, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(6, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
// We have only 5 hosts remaining, so if we fail another host, we should only be able to redeploy app1's
// node, while app2's should remain
@@ -520,10 +521,10 @@ public class NodeFailerTest {
tester.runMaintainers();
assertEquals(6 + 2, tester.deployer.redeployments);
- assertEquals(9, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals(4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(5, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size());
+ assertEquals(9, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
+ assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
@Test
@@ -545,7 +546,7 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
- assertEquals(count, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.active).size());
+ assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
Set<String> downHosts = Set.of("host2", "host3");
@@ -558,7 +559,7 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(5));
tester.allNodesMakeAConfigRequestExcept();
assertEquals( 0, tester.deployer.redeployments);
- assertEquals(count, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.active).size());
+ assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
tester.clock.advance(Duration.ofMinutes(60));
@@ -566,15 +567,15 @@ public class NodeFailerTest {
// one down host should now be failed, but not two as we are only allowed to fail one proxy
assertEquals(expectedFailCount, tester.deployer.redeployments);
- assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.active).size());
- assertEquals(expectedFailCount, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.failed).size());
- tester.nodeRepository.nodes().getNodes(nodeType, Node.State.failed)
+ assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
+ assertEquals(expectedFailCount, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType).size());
+ tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType)
.forEach(node -> assertTrue(downHosts.contains(node.hostname())));
// trying to fail again will still not fail the other down host
tester.clock.advance(Duration.ofMinutes(60));
tester.runMaintainers();
- assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.active).size());
+ assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
@Test
@@ -589,7 +590,7 @@ public class NodeFailerTest {
tester.nodeRepository.nodes().write(readyNode.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {});
tester.runMaintainers();
- assertEquals(1, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).size());
}
@Test
@@ -599,7 +600,7 @@ public class NodeFailerTest {
// 50 regular tenant nodes, 10 hosts with each 3 tenant nodes, total 90 nodes
NodeFailTester tester = NodeFailTester.withTwoApplicationsOnDocker(10);
List<Node> readyNodes = tester.createReadyNodes(50, 30);
- List<Node> hosts = tester.nodeRepository.nodes().getNodes(NodeType.host);
+ NodeList hosts = tester.nodeRepository.nodes().list().nodeType(NodeType.host);
List<Node> deadNodes = readyNodes.subList(0, 4);
// 2 hours pass, 4 physical nodes die
@@ -610,7 +611,7 @@ public class NodeFailerTest {
// 2 nodes are failed (the minimum amount that are always allowed to fail)
tester.runMaintainers();
- assertEquals(2, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(2, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 2, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
@@ -620,7 +621,7 @@ public class NodeFailerTest {
tester.allNodesMakeAConfigRequestExcept(deadNodes);
}
tester.runMaintainers();
- assertEquals(2, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(2, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 2, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
@@ -630,7 +631,7 @@ public class NodeFailerTest {
tester.allNodesMakeAConfigRequestExcept(deadNodes);
}
tester.runMaintainers();
- assertEquals(4, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size());
// 24 more hours pass, nothing happens
for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) {
@@ -639,7 +640,7 @@ public class NodeFailerTest {
}
// 3 hosts fail. 2 of them and all of their children are allowed to fail
- List<Node> failedHosts = hosts.subList(0, 3);
+ List<Node> failedHosts = hosts.asList().subList(0, 3);
failedHosts.forEach(host -> {
tester.serviceMonitor.setHostDown(host.hostname());
deadNodes.add(host);
@@ -652,7 +653,7 @@ public class NodeFailerTest {
assertEquals(4 + /* already failed */
2 + /* hosts */
(2 * 3) /* containers per host */,
- tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
@@ -662,14 +663,14 @@ public class NodeFailerTest {
tester.allNodesMakeAConfigRequestExcept(deadNodes);
}
tester.runMaintainers();
- assertEquals(12, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric));
// The final host and its containers are failed out
tester.clock.advance(Duration.ofMinutes(30));
tester.runMaintainers();
- assertEquals(16, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(16, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
@@ -677,7 +678,7 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofHours(25));
tester.allNodesMakeAConfigRequestExcept(deadNodes);
tester.runMaintainers();
- assertEquals(16, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(16, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
@@ -695,7 +696,7 @@ public class NodeFailerTest {
}
tester.runMaintainers();
// 2% are allowed to fail
- assertEquals(10, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
@@ -705,7 +706,7 @@ public class NodeFailerTest {
tester.allNodesMakeAConfigRequestExcept(deadNodes);
}
tester.runMaintainers();
- assertEquals(10, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
@@ -715,7 +716,7 @@ public class NodeFailerTest {
tester.allNodesMakeAConfigRequestExcept(deadNodes);
}
tester.runMaintainers();
- assertEquals(15, tester.nodeRepository.nodes().getNodes(Node.State.failed).size());
+ assertEquals(15, tester.nodeRepository.nodes().list(Node.State.failed).size());
assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made.", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric));
assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric));
}
@@ -758,7 +759,7 @@ public class NodeFailerTest {
*/
private static String selectFirstParentHostWithNActiveNodesExcept(NodeRepository nodeRepository, int n, String... except) {
Set<String> exceptSet = Arrays.stream(except).collect(Collectors.toSet());
- return nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).stream()
+ return nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).stream()
.collect(Collectors.groupingBy(Node::parentHostname))
.entrySet().stream()
.filter(entry -> entry.getValue().size() == n)
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java
index b9f3985172b..dbaa5f034f6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java
@@ -8,6 +8,7 @@ import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
import org.junit.Test;
@@ -94,8 +95,8 @@ public class NodeRebooterTest {
while (true) {
rebooter.maintain();
simulateReboot(nodeRepository);
- List<Node> nodes = nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready);
- int count = withCurrentRebootGeneration(1L, nodes).size();
+ NodeList nodes = nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host);
+ int count = withCurrentRebootGeneration(1L, nodes.asList()).size();
if (count == 2) {
break;
}
@@ -103,8 +104,8 @@ public class NodeRebooterTest {
}
private void assertReadyHosts(int expectedCount, NodeRepository nodeRepository, long generation) {
- List<Node> nodes = nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready);
- assertEquals(expectedCount, withCurrentRebootGeneration(generation, nodes).size());
+ NodeList nodes = nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host);
+ assertEquals(expectedCount, withCurrentRebootGeneration(generation, nodes.asList()).size());
}
private void makeReadyHosts(int count, ProvisioningTester tester) {
@@ -113,7 +114,7 @@ public class NodeRebooterTest {
/** Set current reboot generation to the wanted reboot generation whenever it is larger (i.e record a reboot) */
private void simulateReboot(NodeRepository nodeRepository) {
- for (Node node : nodeRepository.nodes().getNodes(Node.State.ready, Node.State.active)) {
+ for (Node node : nodeRepository.nodes().list(Node.State.ready, Node.State.active)) {
if (node.status().reboot().wanted() > node.status().reboot().current())
nodeRepository.nodes().write(node.withCurrentRebootGeneration(node.status().reboot().wanted(),
nodeRepository.clock().instant()), () -> {});
@@ -129,7 +130,7 @@ public class NodeRebooterTest {
private void simulateOsUpgrade(NodeRepository nodeRepository) {
var wantedOsVersion = nodeRepository.osVersions().targetFor(NodeType.host);
if (wantedOsVersion.isEmpty()) return;
- for (Node node : nodeRepository.nodes().getNodes(Node.State.ready, Node.State.active)) {
+ for (Node node : nodeRepository.nodes().list(Node.State.ready, Node.State.active)) {
if (wantedOsVersion.get().isAfter(node.status().osVersion().current().orElse(Version.emptyVersion)))
nodeRepository.nodes().write(node.withCurrentOsVersion(wantedOsVersion.get(), nodeRepository.clock().instant()),
() -> {});
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
index f331f3bcb4a..db6aebacddc 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
@@ -51,19 +51,19 @@ public class OperatorChangeApplicationMaintainerTest {
maintainer.maintain();
assertEquals("No changes -> no redeployments", 3, fixture.deployer.redeployments);
- nodeRepository.nodes().fail(nodeRepository.nodes().getNodes(fixture.app1).get(3).hostname(), Agent.system, "Failing to unit test");
+ nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("System change -> no redeployments", 3, fixture.deployer.redeployments);
clock.advance(Duration.ofSeconds(1));
- nodeRepository.nodes().fail(nodeRepository.nodes().getNodes(fixture.app2).get(4).hostname(), Agent.operator, "Manual node failing");
+ nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app2).asList().get(4).hostname(), Agent.operator, "Manual node failing");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("Operator change -> redeployment", 4, fixture.deployer.redeployments);
clock.advance(Duration.ofSeconds(1));
- nodeRepository.nodes().fail(nodeRepository.nodes().getNodes(fixture.app3).get(1).hostname(), Agent.operator, "Manual node failing");
+ nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app3).asList().get(1).hostname(), Agent.operator, "Manual node failing");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("Operator change -> redeployment", 5, fixture.deployer.redeployments);
@@ -104,9 +104,9 @@ public class OperatorChangeApplicationMaintainerTest {
deployer.deployFromLocalActive(app1, false).get().activate();
deployer.deployFromLocalActive(app2, false).get().activate();
deployer.deployFromLocalActive(app3, false).get().activate();
- assertEquals(wantedNodesApp1, nodeRepository.nodes().getNodes(app1, Node.State.active).size());
- assertEquals(wantedNodesApp2, nodeRepository.nodes().getNodes(app2, Node.State.active).size());
- assertEquals(wantedNodesApp3, nodeRepository.nodes().getNodes(app3, Node.State.active).size());
+ assertEquals(wantedNodesApp1, nodeRepository.nodes().list(Node.State.active).owner(app1).size());
+ assertEquals(wantedNodesApp2, nodeRepository.nodes().list(Node.State.active).owner(app2).size());
+ assertEquals(wantedNodesApp3, nodeRepository.nodes().list(Node.State.active).owner(app3).size());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivatorTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivatorTest.java
index 36452e05bb6..3f0b94170f6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivatorTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivatorTest.java
@@ -103,7 +103,7 @@ public class OsUpgradeActivatorTest {
private Stream<Node> streamUpdatedNodes(List<Node> nodes) {
Stream<Node> stream = Stream.empty();
for (var node : nodes) {
- stream = Stream.concat(stream, tester.nodeRepository().nodes().getNode(node.hostname()).stream());
+ stream = Stream.concat(stream, tester.nodeRepository().nodes().node(node.hostname()).stream());
}
return stream;
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
index 1f1e6a79317..e280a0211e4 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
@@ -64,32 +64,32 @@ public class PeriodicApplicationMaintainerTest {
fixture.setBootstrapping(false);
// Fail and park some nodes
- nodeRepository.nodes().fail(nodeRepository.nodes().getNodes(fixture.app1).get(3).hostname(), Agent.system, "Failing to unit test");
- nodeRepository.nodes().fail(nodeRepository.nodes().getNodes(fixture.app2).get(0).hostname(), Agent.system, "Failing to unit test");
- nodeRepository.nodes().park(nodeRepository.nodes().getNodes(fixture.app2).get(4).hostname(), true, Agent.system, "Parking to unit test");
+ nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test");
+ nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app2).asList().get(0).hostname(), Agent.system, "Failing to unit test");
+ nodeRepository.nodes().park(nodeRepository.nodes().list().owner(fixture.app2).asList().get(4).hostname(), true, Agent.system, "Parking to unit test");
int failedInApp1 = 1;
int failedOrParkedInApp2 = 2;
- assertEquals(fixture.wantedNodesApp1 - failedInApp1, nodeRepository.nodes().getNodes(fixture.app1, Node.State.active).size());
- assertEquals(fixture.wantedNodesApp2 - failedOrParkedInApp2, nodeRepository.nodes().getNodes(fixture.app2, Node.State.active).size());
- assertEquals(failedInApp1 + failedOrParkedInApp2, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed, Node.State.parked).size());
- assertEquals(3, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(2, nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready).size());
+ assertEquals(fixture.wantedNodesApp1 - failedInApp1, nodeRepository.nodes().list(Node.State.active).owner(fixture.app1).size());
+ assertEquals(fixture.wantedNodesApp2 - failedOrParkedInApp2, nodeRepository.nodes().list(Node.State.active).owner(fixture.app2).size());
+ assertEquals(failedInApp1 + failedOrParkedInApp2, nodeRepository.nodes().list(Node.State.failed, Node.State.parked).nodeType(NodeType.tenant).size());
+ assertEquals(3, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
+ assertEquals(2, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
// Cause maintenance deployment which will allocate replacement nodes
fixture.runApplicationMaintainer();
- assertEquals(fixture.wantedNodesApp1, nodeRepository.nodes().getNodes(fixture.app1, Node.State.active).size());
- assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().getNodes(fixture.app2, Node.State.active).size());
- assertEquals(0, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(fixture.wantedNodesApp1, nodeRepository.nodes().list(Node.State.active).owner(fixture.app1).size());
+ assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(Node.State.active).owner(fixture.app2).size());
+ assertEquals(0, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
// Reactivate the previously failed nodes
- nodeRepository.nodes().reactivate(nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).get(0).hostname(), Agent.system, getClass().getSimpleName());
- nodeRepository.nodes().reactivate(nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).get(0).hostname(), Agent.system, getClass().getSimpleName());
- nodeRepository.nodes().reactivate(nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.parked).get(0).hostname(), Agent.system, getClass().getSimpleName());
+ nodeRepository.nodes().reactivate(nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).first().get().hostname(), Agent.system, getClass().getSimpleName());
+ nodeRepository.nodes().reactivate(nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).first().get().hostname(), Agent.system, getClass().getSimpleName());
+ nodeRepository.nodes().reactivate(nodeRepository.nodes().list(Node.State.parked).nodeType(NodeType.tenant).first().get().hostname(), Agent.system, getClass().getSimpleName());
int reactivatedInApp1 = 1;
int reactivatedInApp2 = 2;
- assertEquals(0, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals(fixture.wantedNodesApp1 + reactivatedInApp1, nodeRepository.nodes().getNodes(fixture.app1, Node.State.active).size());
- assertEquals(fixture.wantedNodesApp2 + reactivatedInApp2, nodeRepository.nodes().getNodes(fixture.app2, Node.State.active).size());
+ assertEquals(0, nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
+ assertEquals(fixture.wantedNodesApp1 + reactivatedInApp1, nodeRepository.nodes().list(Node.State.active).owner(fixture.app1).size());
+ assertEquals(fixture.wantedNodesApp2 + reactivatedInApp2, nodeRepository.nodes().list(Node.State.active).owner(fixture.app2).size());
assertEquals("The reactivated nodes are now active but not part of the application",
0, fixture.getNodes(Node.State.active).retired().size());
@@ -108,17 +108,17 @@ public class PeriodicApplicationMaintainerTest {
fixture.activate();
// Freeze active nodes to simulate an application being deleted during a maintenance run
- List<Node> frozenActiveNodes = nodeRepository.nodes().getNodes(Node.State.active);
+ NodeList frozenActiveNodes = nodeRepository.nodes().list(Node.State.active);
// Remove one application without letting the application maintainer know about it
fixture.remove(fixture.app2);
- assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().getNodes(fixture.app2, Node.State.inactive).size());
+ assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(Node.State.inactive).owner(fixture.app2).size());
// Nodes belonging to app2 are inactive after maintenance
fixture.maintainer.setOverriddenNodesNeedingMaintenance(frozenActiveNodes);
fixture.runApplicationMaintainer();
assertEquals("Inactive nodes were incorrectly activated after maintenance", fixture.wantedNodesApp2,
- nodeRepository.nodes().getNodes(fixture.app2, Node.State.inactive).size());
+ nodeRepository.nodes().list(Node.State.inactive).owner(fixture.app2).size());
}
@Test(timeout = 60_000)
@@ -232,8 +232,8 @@ public class PeriodicApplicationMaintainerTest {
void activate() {
deployer.deployFromLocalActive(app1, false).get().activate();
deployer.deployFromLocalActive(app2, false).get().activate();
- assertEquals(wantedNodesApp1, nodeRepository.nodes().getNodes(app1, Node.State.active).size());
- assertEquals(wantedNodesApp2, nodeRepository.nodes().getNodes(app2, Node.State.active).size());
+ assertEquals(wantedNodesApp1, nodeRepository.nodes().list(Node.State.active).owner(app1).size());
+ assertEquals(wantedNodesApp2, nodeRepository.nodes().list(Node.State.active).owner(app2).size());
}
void remove(ApplicationId application) {
@@ -250,7 +250,7 @@ public class PeriodicApplicationMaintainerTest {
}
NodeList getNodes(Node.State ... states) {
- return NodeList.copyOf(nodeRepository.nodes().getNodes(NodeType.tenant, states));
+ return nodeRepository.nodes().list(states).nodeType(NodeType.tenant);
}
void setBootstrapping(boolean bootstrapping) {
@@ -261,9 +261,9 @@ public class PeriodicApplicationMaintainerTest {
private static class TestablePeriodicApplicationMaintainer extends PeriodicApplicationMaintainer {
- private List<Node> overriddenNodesNeedingMaintenance;
+ private NodeList overriddenNodesNeedingMaintenance;
- void setOverriddenNodesNeedingMaintenance(List<Node> overriddenNodesNeedingMaintenance) {
+ void setOverriddenNodesNeedingMaintenance(NodeList overriddenNodesNeedingMaintenance) {
this.overriddenNodesNeedingMaintenance = overriddenNodesNeedingMaintenance;
}
@@ -273,7 +273,7 @@ public class PeriodicApplicationMaintainerTest {
}
@Override
- protected List<Node> nodesNeedingMaintenance() {
+ protected NodeList nodesNeedingMaintenance() {
return overriddenNodesNeedingMaintenance != null
? overriddenNodesNeedingMaintenance
: super.nodesNeedingMaintenance();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java
index bc2676c0acf..9fddaab8b3b 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java
@@ -17,6 +17,7 @@ import com.yahoo.config.provisioning.FlavorsConfig;
import com.yahoo.test.ManualClock;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder;
@@ -92,7 +93,7 @@ public class RebalancerTest {
tester.maintain();
assertTrue("Want to retire is reset", tester.getNodes(Node.State.active).stream().noneMatch(node -> node.status().wantToRetire()));
assertEquals("Reserved node was moved to dirty", 1, tester.getNodes(Node.State.dirty).size());
- String reservedHostname = tester.getNodes(Node.State.dirty).get(0).hostname();
+ String reservedHostname = tester.getNodes(Node.State.dirty).first().get().hostname();
tester.nodeRepository().nodes().setReady(reservedHostname, Agent.system, "Cleanup");
tester.nodeRepository().nodes().removeRecursively(reservedHostname);
@@ -176,18 +177,18 @@ public class RebalancerTest {
}
List<Node> getNodes(ApplicationId applicationId, Node.State nodeState) {
- return tester.nodeRepository().nodes().getNodes(applicationId, nodeState);
+ return tester.nodeRepository().nodes().list(nodeState).owner(applicationId).asList();
}
boolean isNodeRetired(Node node) {
return getNode(node.hostname()).get().allocation().get().membership().retired();
}
- Optional<Node> getNode(String hostname) { return tester.nodeRepository().nodes().getNode(hostname); }
+ Optional<Node> getNode(String hostname) { return tester.nodeRepository().nodes().node(hostname); }
- List<Node> getNodes(Node.State nodeState) { return tester.nodeRepository().nodes().getNodes(nodeState); }
+ NodeList getNodes(Node.State nodeState) { return tester.nodeRepository().nodes().list(nodeState); }
- Node getNode(ApplicationId applicationId) { return tester.nodeRepository().nodes().getNodes(applicationId).get(0); }
+ Node getNode(ApplicationId applicationId) { return tester.nodeRepository().nodes().list().owner(applicationId).first().get(); }
ManualClock clock() { return tester.clock(); }
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java
index 5b67c7bc358..2248f11b141 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java
@@ -10,6 +10,7 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
@@ -40,22 +41,22 @@ public class ReservationExpirerTest {
tester.makeReadyHosts(1, hostResources);
// Reserve 2 nodes
- assertEquals(2, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(2, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
ApplicationId applicationId = new ApplicationId.Builder().tenant("foo").applicationName("bar").instanceName("fuz").build();
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
tester.provisioner().prepare(applicationId, cluster, Capacity.from(new ClusterResources(2, 1, nodeResources)), null);
- assertEquals(2, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.reserved).size());
+ assertEquals(2, nodeRepository.nodes().list(Node.State.reserved).nodeType(NodeType.tenant).size());
// Reservation times out
clock.advance(Duration.ofMinutes(14)); // Reserved but not used time out
new ReservationExpirer(nodeRepository, Duration.ofMinutes(10), metric).run();
// Assert nothing is reserved
- assertEquals(0, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.reserved).size());
- List<Node> dirty = nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.dirty);
+ assertEquals(0, nodeRepository.nodes().list(Node.State.reserved).nodeType(NodeType.tenant).size());
+ NodeList dirty = nodeRepository.nodes().list(Node.State.dirty).nodeType(NodeType.tenant);
assertEquals(2, dirty.size());
- assertFalse(dirty.get(0).allocation().isPresent());
- assertFalse(dirty.get(1).allocation().isPresent());
+ assertFalse(dirty.asList().get(0).allocation().isPresent());
+ assertFalse(dirty.asList().get(1).allocation().isPresent());
assertEquals(2, metric.values.get("expired.reserved"));
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
index fa492c3a3e9..c1c6e5b6154 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
@@ -71,8 +71,8 @@ public class RetiredExpirerTest {
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
- assertEquals(7, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size());
- assertEquals(0, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size());
+ assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
// Cause inactivation of retired nodes
clock.advance(Duration.ofHours(30)); // Retire period spent
@@ -83,12 +83,12 @@ public class RetiredExpirerTest {
cluster,
Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources)))));
createRetiredExpirer(deployer).run();
- assertEquals(3, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size());
- assertEquals(4, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size());
+ assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
// inactivated nodes are not retired
- for (Node node : nodeRepository.nodes().getNodes(applicationId, Node.State.inactive))
+ for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
@@ -106,8 +106,8 @@ public class RetiredExpirerTest {
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
- assertEquals(7, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size());
- assertEquals(0, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size());
+ assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
// Cause inactivation of retired nodes
MockDeployer deployer =
@@ -128,27 +128,27 @@ public class RetiredExpirerTest {
RetiredExpirer retiredExpirer = createRetiredExpirer(deployer);
retiredExpirer.run();
- assertEquals(5, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size());
- assertEquals(2, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size());
+ assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(4)).acquirePermissionToRemove(any());
// Running it again has no effect
retiredExpirer.run();
- assertEquals(5, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size());
- assertEquals(2, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size());
+ assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
clock.advance(RETIRED_EXPIRATION.plusMinutes(1));
retiredExpirer.run();
- assertEquals(3, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size());
- assertEquals(4, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size());
+ assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(2, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
// inactivated nodes are not retired
- for (Node node : nodeRepository.nodes().getNodes(applicationId, Node.State.inactive))
+ for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
index 4d334147212..6581008268d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
@@ -14,6 +14,7 @@ import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Zone;
import com.yahoo.config.provisioning.FlavorsConfig;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.autoscale.MetricSnapshot;
@@ -114,14 +115,14 @@ public class ScalingSuggestionsMaintainerTest {
}
private boolean shouldSuggest(ApplicationId app, ClusterSpec cluster, ProvisioningTester tester) {
- var currentResources = tester.nodeRepository().nodes().list(app).cluster(cluster.id()).not().retired().toResources();
+ var currentResources = tester.nodeRepository().nodes().list().owner(app).cluster(cluster.id()).not().retired().toResources();
return tester.nodeRepository().applications().get(app).get().cluster(cluster.id()).get()
.shouldSuggestResources(currentResources);
}
public void addMeasurements(float cpu, float memory, float disk, int generation, int count, ApplicationId applicationId,
NodeRepository nodeRepository, MetricsDb db) {
- List<Node> nodes = nodeRepository.nodes().getNodes(applicationId, Node.State.active);
+ NodeList nodes = nodeRepository.nodes().list(Node.State.active).owner(applicationId);
for (int i = 0; i < count; i++) {
for (Node node : nodes)
db.add(List.of(new Pair<>(node.hostname(), new MetricSnapshot(nodeRepository.clock().instant(),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
index 7005a64127a..09e142b68a0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
@@ -40,7 +40,7 @@ public class OsVersionsTest {
public void upgrade() {
var versions = new OsVersions(tester.nodeRepository(), new DelegatingUpgrader(tester.nodeRepository(), Integer.MAX_VALUE));
provisionInfraApplication(10);
- Supplier<List<Node>> hostNodes = () -> tester.nodeRepository().nodes().getNodes(NodeType.host);
+ Supplier<NodeList> hostNodes = () -> tester.nodeRepository().nodes().list().nodeType(NodeType.host);
// Upgrade OS
assertTrue("No versions set", versions.readChange().targets().isEmpty());
@@ -50,7 +50,7 @@ public class OsVersionsTest {
assertTrue("Per-node wanted OS version remains unset", hostNodes.get().stream().allMatch(node -> node.status().osVersion().wanted().isEmpty()));
// One host upgrades to a later version outside the control of orchestration
- Node hostOnLaterVersion = hostNodes.get().get(0);
+ Node hostOnLaterVersion = hostNodes.get().first().get();
setCurrentVersion(List.of(hostOnLaterVersion), Version.fromString("8.1"));
// Upgrade OS again
@@ -60,12 +60,12 @@ public class OsVersionsTest {
// Resume upgrade
versions.resumeUpgradeOf(NodeType.host, true);
- List<Node> allHosts = hostNodes.get();
+ NodeList allHosts = hostNodes.get();
assertTrue("Wanted version is set", allHosts.stream()
.filter(node -> !node.equals(hostOnLaterVersion))
.allMatch(node -> node.status().osVersion().wanted().isPresent()));
assertTrue("Wanted version is not set for host on later version",
- allHosts.get(0).status().osVersion().wanted().isEmpty());
+ allHosts.first().get().status().osVersion().wanted().isEmpty());
// Halt upgrade
versions.resumeUpgradeOf(NodeType.host, false);
@@ -107,7 +107,7 @@ public class OsVersionsTest {
tester.nodeRepository().nodes().fail(host.hostname(), Agent.system, OsVersions.class.getSimpleName());
tester.nodeRepository().nodes().removeRecursively(host.hostname());
}
- assertEquals(10, tester.nodeRepository().nodes().getNodes(Node.State.deprovisioned).size());
+ assertEquals(10, tester.nodeRepository().nodes().list(Node.State.deprovisioned).size());
// Set target
var version1 = Version.fromString("7.1");
@@ -256,7 +256,7 @@ public class OsVersionsTest {
tester.prepareAndActivateInfraApplication(infraApplication, nodeType);
return nodes.stream()
.map(Node::hostname)
- .flatMap(hostname -> tester.nodeRepository().nodes().getNode(hostname).stream())
+ .flatMap(hostname -> tester.nodeRepository().nodes().node(hostname).stream())
.collect(Collectors.toList());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
index 23a8af045af..8033663c6cf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
@@ -8,6 +8,7 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.node.NodeAcl;
import org.junit.Test;
@@ -33,7 +34,7 @@ public class AclProvisioningTest {
@Test
public void trusted_nodes_for_allocated_node() {
- List<Node> configServers = tester.makeConfigServers(3, "d-1-4-10", Version.fromString("6.123.456"));
+ NodeList configServers = tester.makeConfigServers(3, "d-1-4-10", Version.fromString("6.123.456"));
// Populate repo
tester.makeReadyNodes(10, new NodeResources(1, 4, 10, 1));
@@ -51,18 +52,18 @@ public class AclProvisioningTest {
// Get trusted nodes for the first active node
Node node = activeNodes.get(0);
- List<Node> host = node.parentHostname().flatMap(tester.nodeRepository().nodes()::getNode).map(List::of).orElseGet(List::of);
+ List<Node> host = node.parentHostname().flatMap(tester.nodeRepository().nodes()::node).map(List::of).orElseGet(List::of);
Supplier<NodeAcl> nodeAcls = () -> node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers());
// Trusted nodes are active nodes in same application, proxy nodes and config servers
- assertAcls(List.of(activeNodes, proxyNodes, configServers, host),
+ assertAcls(List.of(activeNodes, proxyNodes, configServers.asList(), host),
Set.of("10.2.3.0/24", "10.4.5.0/24"),
List.of(nodeAcls.get()));
}
@Test
public void trusted_nodes_for_unallocated_node() {
- List<Node> configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456"));
+ NodeList configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456"));
// Populate repo
tester.makeReadyNodes(10, nodeResources);
@@ -72,17 +73,17 @@ public class AclProvisioningTest {
deploy(2);
// Get trusted nodes for a ready tenant node
- Node node = tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.ready).get(0);
+ Node node = tester.nodeRepository().nodes().list(Node.State.ready).nodeType(NodeType.tenant).first().get();
NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers());
- List<Node> tenantNodes = tester.nodeRepository().nodes().getNodes(NodeType.tenant);
+ NodeList tenantNodes = tester.nodeRepository().nodes().list().nodeType(NodeType.tenant);
// Trusted nodes are all proxy-, config-, and, tenant-nodes
- assertAcls(List.of(proxyNodes, configServers, tenantNodes), List.of(nodeAcl));
+ assertAcls(List.of(proxyNodes, configServers.asList(), tenantNodes.asList()), List.of(nodeAcl));
}
@Test
public void trusted_nodes_for_config_server() {
- List<Node> configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456"));
+ NodeList configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456"));
// Populate repo
tester.makeReadyNodes(10, nodeResources);
@@ -90,20 +91,20 @@ public class AclProvisioningTest {
// Allocate 2 nodes
deploy(4);
- List<Node> tenantNodes = tester.nodeRepository().nodes().getNodes(NodeType.tenant);
+ NodeList tenantNodes = tester.nodeRepository().nodes().list().nodeType(NodeType.tenant);
// Get trusted nodes for the first config server
- Node node = tester.nodeRepository().nodes().getNode("cfg1")
+ Node node = tester.nodeRepository().nodes().node("cfg1")
.orElseThrow(() -> new RuntimeException("Failed to find cfg1"));
NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers());
// Trusted nodes is all tenant nodes, all proxy nodes, all config servers and load balancer subnets
- assertAcls(List.of(tenantNodes, proxyNodes, configServers), Set.of("10.2.3.0/24", "10.4.5.0/24"), List.of(nodeAcl));
+ assertAcls(List.of(tenantNodes.asList(), proxyNodes, configServers.asList()), Set.of("10.2.3.0/24", "10.4.5.0/24"), List.of(nodeAcl));
}
@Test
public void trusted_nodes_for_proxy() {
- List<Node> configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456"));
+ NodeList configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456"));
// Populate repo
tester.makeReadyNodes(10, "default");
@@ -114,17 +115,17 @@ public class AclProvisioningTest {
tester.deploy(zoneApplication, Capacity.fromRequiredNodeType(NodeType.proxy));
// Get trusted nodes for first proxy node
- List<Node> proxyNodes = tester.nodeRepository().nodes().getNodes(zoneApplication);
- Node node = proxyNodes.get(0);
+ NodeList proxyNodes = tester.nodeRepository().nodes().list().owner(zoneApplication);
+ Node node = proxyNodes.first().get();
NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers());
// Trusted nodes is all config servers and all proxy nodes
- assertAcls(List.of(proxyNodes, configServers), List.of(nodeAcl));
+ assertAcls(List.of(proxyNodes.asList(), configServers.asList()), List.of(nodeAcl));
}
@Test
public void trusted_nodes_for_children_of_docker_host() {
- List<Node> configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456"));
+ NodeList configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456"));
// Populate repo
List<Node> dockerHostNodes = tester.makeReadyNodes(2, "default", NodeType.host);
@@ -143,7 +144,7 @@ public class AclProvisioningTest {
.findFirst()
.orElseThrow(() -> new RuntimeException("Expected to find ACL for node " + dockerNode.hostname()));
assertEquals(dockerHostNodeUnderTest.hostname(), dockerNode.parentHostname().get());
- assertAcls(List.of(configServers, dockerNodes, List.of(dockerHostNodeUnderTest)), nodeAcl);
+ assertAcls(List.of(configServers.asList(), dockerNodes, List.of(dockerHostNodeUnderTest)), nodeAcl);
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
index 02ee41a5226..12a8b476d5e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
@@ -366,7 +366,7 @@ public class DockerProvisioningTest {
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r)));
var tx = new ApplicationTransaction(new ProvisionLock(app1, tester.nodeRepository().nodes().lock(app1)), new NestedTransaction());
- tester.nodeRepository().nodes().deactivate(tester.nodeRepository().nodes().list(app1, Node.State.active).retired().asList(), tx);
+ tester.nodeRepository().nodes().deactivate(tester.nodeRepository().nodes().list(Node.State.active).owner(app1).retired().asList(), tx);
tx.nested().commit();
assertEquals(2, tester.getNodes(app1, Node.State.active).size());
@@ -413,8 +413,8 @@ public class DockerProvisioningTest {
}
else {
assertEquals(0, tester.getNodes(app1, Node.State.inactive).size());
- assertEquals(2, tester.nodeRepository().nodes().getNodes(Node.State.dirty).size());
- tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().getNodes(Node.State.dirty), Agent.system, "test");
+ assertEquals(2, tester.nodeRepository().nodes().list(Node.State.dirty).size());
+ tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().list(Node.State.dirty).asList(), Agent.system, "test");
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(4, 1, r)));
}
@@ -434,13 +434,13 @@ public class DockerProvisioningTest {
private void assertNodeParentReservation(List<Node> nodes, Optional<TenantName> reservation, ProvisioningTester tester) {
for (Node node : nodes)
- assertEquals(reservation, tester.nodeRepository().nodes().getNode(node.parentHostname().get()).get().reservedTo());
+ assertEquals(reservation, tester.nodeRepository().nodes().node(node.parentHostname().get()).get().reservedTo());
}
private void assertHostSpecParentReservation(List<HostSpec> hostSpecs, Optional<TenantName> reservation, ProvisioningTester tester) {
for (HostSpec hostSpec : hostSpecs) {
- Node node = tester.nodeRepository().nodes().getNode(hostSpec.hostname()).get();
- assertEquals(reservation, tester.nodeRepository().nodes().getNode(node.parentHostname().get()).get().reservedTo());
+ Node node = tester.nodeRepository().nodes().node(hostSpec.hostname()).get();
+ assertEquals(reservation, tester.nodeRepository().nodes().node(node.parentHostname().get()).get().reservedTo());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
index 0c8e19e0793..cf7083ccc4f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
@@ -69,7 +69,7 @@ public class DynamicDockerAllocationTest {
.build();
tester.makeReadyNodes(4, "host-small", NodeType.host, 32);
tester.activateTenantHosts();
- List<Node> dockerHosts = tester.nodeRepository().nodes().getNodes(NodeType.host, State.active);
+ List<Node> dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList();
NodeResources flavor = new NodeResources(1, 4, 100, 1);
// Application 1
@@ -110,7 +110,7 @@ public class DynamicDockerAllocationTest {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build();
tester.makeReadyNodes(5, "host-small", NodeType.host, 32);
tester.activateTenantHosts();
- List<Node> dockerHosts = tester.nodeRepository().nodes().getNodes(NodeType.host, State.active);
+ NodeList dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host);
NodeResources resources = new NodeResources(1, 4, 100, 0.3);
// Application 1
@@ -202,7 +202,7 @@ public class DynamicDockerAllocationTest {
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build();
tester.makeReadyNodes(2, "host-small", NodeType.host, 32);
tester.activateTenantHosts();
- List<Node> dockerHosts = tester.nodeRepository().nodes().getNodes(NodeType.host, State.active);
+ List<Node> dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList();
NodeResources flavor = new NodeResources(1, 4, 100, 1);
// Application 1
@@ -315,9 +315,9 @@ public class DynamicDockerAllocationTest {
List<HostSpec> hosts = tester.prepare(application, clusterSpec("myContent.t1.a1"), 2, 1, new NodeResources(1, 4, 100, 1));
tester.activate(application, hosts);
- List<Node> activeNodes = tester.nodeRepository().nodes().getNodes(application);
- assertEquals(ImmutableSet.of("127.0.127.13", "::13"), activeNodes.get(0).ipConfig().primary());
- assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.get(1).ipConfig().primary());
+ NodeList activeNodes = tester.nodeRepository().nodes().list().owner(application);
+ assertEquals(ImmutableSet.of("127.0.127.13", "::13"), activeNodes.asList().get(0).ipConfig().primary());
+ assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.asList().get(1).ipConfig().primary());
}
@Test
@@ -437,16 +437,16 @@ public class DynamicDockerAllocationTest {
// Redeploy does not change allocation as a host with switch information is no better or worse than hosts
// without switch information
- List<Node> allocatedNodes = tester.nodeRepository().nodes().getNodes(app1);
+ NodeList allocatedNodes = tester.nodeRepository().nodes().list().owner(app1);
tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources))));
- assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().getNodes(app1));
+ assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list().owner(app1));
// Initial hosts are attached to the same switch
tester.patchNodes(hosts0, (host) -> host.withSwitchHostname(switch0));
// Redeploy does not change allocation
tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources))));
- assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().getNodes(app1));
+ assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list().owner(app1));
// One regular host and one slow-disk host are provisioned on the same switch
String switch1 = "switch1";
@@ -522,11 +522,10 @@ public class DynamicDockerAllocationTest {
}
private List<Node> findSpareCapacity(ProvisioningTester tester) {
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(State.values());
- NodeList nl = NodeList.copyOf(nodes);
+ NodeList nodes = tester.nodeRepository().nodes().list(State.values());
return nodes.stream()
.filter(n -> n.type() == NodeType.host)
- .filter(n -> nl.childrenOf(n).size() == 0) // Nodes without children
+ .filter(n -> nodes.childrenOf(n).size() == 0) // Nodes without children
.collect(Collectors.toList());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
index 1d9c04999a1..7d8fec95d3a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
@@ -78,9 +78,9 @@ public class DynamicDockerProvisionTest {
// Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes
assertEquals(8, tester.nodeRepository().nodes().list().size());
- assertEquals(4, tester.nodeRepository().nodes().getNodes(NodeType.host, Node.State.active).size());
+ assertEquals(4, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size());
assertEquals(List.of("host-100-1", "host-101-1", "host-102-1", "host-103-1"),
- tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.active).stream()
+ tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).stream()
.map(Node::hostname).sorted().collect(Collectors.toList()));
// Deploy new application
@@ -89,10 +89,10 @@ public class DynamicDockerProvisionTest {
// Total of 12 nodes should now be in node-repo, 4 active hosts and 8 active nodes
assertEquals(12, tester.nodeRepository().nodes().list().size());
- assertEquals(4, tester.nodeRepository().nodes().getNodes(NodeType.host, Node.State.active).size());
+ assertEquals(4, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size());
assertEquals(List.of("host-100-1", "host-100-2", "host-101-1", "host-101-2", "host-102-1", "host-102-2",
"host-103-1", "host-103-2"),
- tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.active).stream()
+ tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).stream()
.map(Node::hostname).sorted().collect(Collectors.toList()));
// Deploy new exclusive application
@@ -104,8 +104,8 @@ public class DynamicDockerProvisionTest {
// Total of 20 nodes should now be in node-repo, 8 active hosts and 12 active nodes
assertEquals(20, tester.nodeRepository().nodes().list().size());
- assertEquals(8, tester.nodeRepository().nodes().getNodes(NodeType.host, Node.State.active).size());
- assertEquals(12, tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.active).size());
+ assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size());
+ assertEquals(12, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
verifyNoMoreInteractions(hostProvisioner);
}
@@ -125,7 +125,7 @@ public class DynamicDockerProvisionTest {
// Total of 16 nodes should now be in node-repo, 8 active hosts and 8 active nodes
assertEquals(16, tester.nodeRepository().nodes().list().size());
- assertEquals(8, tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.active).size());
+ assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, smallResources);
prepareAndActivate(application2, clusterSpec("mycluster", true), 4, 1, smallResources);
@@ -153,11 +153,11 @@ public class DynamicDockerProvisionTest {
ApplicationId application3 = ProvisioningTester.applicationId();
prepareAndActivate(application3, clusterSpec("mycluster"), 3, 1, resources);
- assertEquals(4, tester.nodeRepository().nodes().getNodes(NodeType.tenant).stream().map(Node::parentHostname).distinct().count());
+ assertEquals(4, tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).stream().map(Node::parentHostname).distinct().count());
ApplicationId application4 = ProvisioningTester.applicationId();
prepareAndActivate(application4, clusterSpec("mycluster"), 3, 1, resources);
- assertEquals(5, tester.nodeRepository().nodes().getNodes(NodeType.tenant).stream().map(Node::parentHostname).distinct().count());
+ assertEquals(5, tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).stream().map(Node::parentHostname).distinct().count());
}
@Test
@@ -167,19 +167,19 @@ public class DynamicDockerProvisionTest {
mockHostProvisioner(hostProvisioner, "large", 3, null); // Provision shared hosts
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources);
- Set<Node> initialNodes = tester.nodeRepository().nodes().list(application1).stream().collect(Collectors.toSet());
+ Set<Node> initialNodes = tester.nodeRepository().nodes().list().owner(application1).stream().collect(Collectors.toSet());
assertEquals(4, initialNodes.size());
// Redeploy same application with exclusive=true
mockHostProvisioner(hostProvisioner, "large", 3, application1);
prepareAndActivate(application1, clusterSpec("mycluster", true), 4, 1, resources);
- assertEquals(8, tester.nodeRepository().nodes().list(application1).size());
- assertEquals(initialNodes, tester.nodeRepository().nodes().list(application1).retired().stream().collect(Collectors.toSet()));
+ assertEquals(8, tester.nodeRepository().nodes().list().owner(application1).size());
+ assertEquals(initialNodes, tester.nodeRepository().nodes().list().owner(application1).retired().stream().collect(Collectors.toSet()));
// Redeploy without exclusive again is no-op
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources);
- assertEquals(8, tester.nodeRepository().nodes().list(application1).size());
- assertEquals(initialNodes, tester.nodeRepository().nodes().list(application1).retired().stream().collect(Collectors.toSet()));
+ assertEquals(8, tester.nodeRepository().nodes().list().owner(application1).size());
+ assertEquals(initialNodes, tester.nodeRepository().nodes().list().owner(application1).retired().stream().collect(Collectors.toSet()));
}
@Test
@@ -188,7 +188,7 @@ public class DynamicDockerProvisionTest {
ApplicationId app = ProvisioningTester.applicationId();
Function<Node, Node> retireNode = node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, Instant.now()));
- Function<Integer, Node> getNodeInGroup = group -> tester.nodeRepository().nodes().getNodes(app).stream()
+ Function<Integer, Node> getNodeInGroup = group -> tester.nodeRepository().nodes().list().owner(app).stream()
.filter(node -> node.allocation().get().membership().cluster().group().get().index() == group)
.findAny().orElseThrow();
@@ -209,7 +209,7 @@ public class DynamicDockerProvisionTest {
tester.prepare(app, clusterSpec("content"), 8, 2, resources);
// Verify that nodes have unique indices from 0..9
- var indices = tester.nodeRepository().nodes().getNodes(app).stream()
+ var indices = tester.nodeRepository().nodes().list().owner(app).stream()
.map(node -> node.allocation().get().membership().index())
.collect(Collectors.toSet());
assertTrue(indices.containsAll(IntStream.range(0, 10).boxed().collect(Collectors.toList())));
@@ -398,9 +398,9 @@ public class DynamicDockerProvisionTest {
private void prepareAndActivate(ApplicationId application, ClusterSpec clusterSpec, int nodes, int groups, NodeResources resources) {
List<HostSpec> prepared = tester.prepare(application, clusterSpec, nodes, groups, resources);
- List<Node> provisionedHosts = tester.nodeRepository().nodes().getNodes(NodeType.host, Node.State.provisioned);
+ NodeList provisionedHosts = tester.nodeRepository().nodes().list(Node.State.provisioned).nodeType(NodeType.host);
if (!provisionedHosts.isEmpty()) {
- tester.nodeRepository().nodes().setReady(provisionedHosts, Agent.system, DynamicDockerProvisionTest.class.getSimpleName());
+ tester.nodeRepository().nodes().setReady(provisionedHosts.asList(), Agent.system, DynamicDockerProvisionTest.class.getSimpleName());
tester.activateTenantHosts();
}
tester.activate(application, prepared);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index fece475852a..db6cc1d5fa6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -12,6 +12,7 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance;
import com.yahoo.vespa.hosted.provision.lb.Real;
@@ -136,7 +137,7 @@ public class LoadBalancerProvisionerTest {
// Entire application is removed: Nodes and load balancer are deactivated
tester.remove(app1);
dirtyNodesOf(app1);
- assertTrue("No nodes are allocated to " + app1, tester.nodeRepository().nodes().getNodes(app1, Node.State.reserved, Node.State.active).isEmpty());
+ assertTrue("No nodes are allocated to " + app1, tester.nodeRepository().nodes().list(Node.State.reserved, Node.State.active).owner(app1).isEmpty());
assertEquals(2, lbApp1.get().size());
assertTrue("Deactivated load balancers", lbApp1.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.inactive));
assertTrue("Load balancers for " + app2 + " remain active", lbApp2.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.active));
@@ -167,20 +168,20 @@ public class LoadBalancerProvisionerTest {
var nodes = tester.prepare(app1, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")), 2 , 1, resources);
Supplier<LoadBalancer> lb = () -> tester.nodeRepository().loadBalancers().list(app1).asList().get(0);
assertTrue("Load balancer provisioned with empty reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
- assignIps(tester.nodeRepository().nodes().getNodes(app1));
+ assignIps(tester.nodeRepository().nodes().list().owner(app1));
tester.activate(app1, nodes);
assertFalse("Load balancer is reconfigured with reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
// Application is removed, nodes are deleted and load balancer is deactivated
tester.remove(app1);
- tester.nodeRepository().database().removeNodes(tester.nodeRepository().nodes().getNodes(NodeType.tenant));
- assertTrue("Nodes are deleted", tester.nodeRepository().nodes().getNodes(NodeType.tenant).isEmpty());
+ tester.nodeRepository().database().removeNodes(tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).asList());
+ assertTrue("Nodes are deleted", tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).isEmpty());
assertSame("Load balancer is deactivated", LoadBalancer.State.inactive, lb.get().state());
// Application is redeployed
nodes = tester.prepare(app1, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")), 2 , 1, resources);
assertTrue("Load balancer is reconfigured with empty reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
- assignIps(tester.nodeRepository().nodes().getNodes(app1));
+ assignIps(tester.nodeRepository().nodes().list().owner(app1));
tester.activate(app1, nodes);
assertFalse("Load balancer is reconfigured with reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
}
@@ -269,7 +270,7 @@ public class LoadBalancerProvisionerTest {
}
private void dirtyNodesOf(ApplicationId application) {
- tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().getNodes(application), Agent.system, this.getClass().getSimpleName());
+ tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list().owner(application).asList(), Agent.system, this.getClass().getSimpleName());
}
private Set<HostSpec> prepare(ApplicationId application, ClusterSpec... specs) {
@@ -285,10 +286,10 @@ public class LoadBalancerProvisionerTest {
return allNodes;
}
- private void assignIps(List<Node> nodes) {
+ private void assignIps(NodeList nodes) {
try (var lock = tester.nodeRepository().nodes().lockUnallocated()) {
for (int i = 0; i < nodes.size(); i++) {
- tester.nodeRepository().nodes().write(nodes.get(i).with(IP.Config.EMPTY.withPrimary(Set.of("127.0.0." + i))), lock);
+ tester.nodeRepository().nodes().write(nodes.asList().get(i).with(IP.Config.EMPTY.withPrimary(Set.of("127.0.0." + i))), lock);
}
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java
index 9b564232111..124f7db569a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java
@@ -7,6 +7,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.maintenance.RetiredExpirer;
import com.yahoo.vespa.hosted.provision.maintenance.TestMetric;
import com.yahoo.vespa.hosted.provision.node.Agent;
@@ -53,7 +54,7 @@ public class NodeTypeProvisioningTest {
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals("Reserved all proxies", 11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals("Activated all proxies", 11, nodes.size());
}
@@ -61,7 +62,7 @@ public class NodeTypeProvisioningTest {
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(11, nodes.size());
}
@@ -70,20 +71,20 @@ public class NodeTypeProvisioningTest {
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(13, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(13, nodes.size());
}
{ // Remove 3 proxies then redeploy
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
- tester.nodeRepository().nodes().fail(nodes.get(0).hostname(), Agent.system, "Failing to unit test");
- tester.nodeRepository().nodes().fail(nodes.get(1).hostname(), Agent.system, "Failing to unit test");
- tester.nodeRepository().nodes().fail(nodes.get(5).hostname(), Agent.system, "Failing to unit test");
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
+ tester.nodeRepository().nodes().fail(nodes.asList().get(0).hostname(), Agent.system, "Failing to unit test");
+ tester.nodeRepository().nodes().fail(nodes.asList().get(1).hostname(), Agent.system, "Failing to unit test");
+ tester.nodeRepository().nodes().fail(nodes.asList().get(5).hostname(), Agent.system, "Failing to unit test");
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(10, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(10, nodes.size());
}
}
@@ -107,22 +108,22 @@ public class NodeTypeProvisioningTest {
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals("Reserved all proxies", 11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals("Activated all proxies", 11, nodes.size());
}
- Node nodeToRetire = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active).get(5);
+ Node nodeToRetire = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy).asList().get(5);
{ // Pick out a node and retire it
tester.nodeRepository().nodes().write(nodeToRetire.withWantToRetire(true, Agent.system, tester.clock().instant()), () -> {});
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(11, nodes.size());
// Verify that wantToRetire has been propagated
- assertTrue(tester.nodeRepository().nodes().getNode(nodeToRetire.hostname())
+ assertTrue(tester.nodeRepository().nodes().node(nodeToRetire.hostname())
.flatMap(Node::allocation)
.map(allocation -> allocation.membership().retired())
.orElseThrow(RuntimeException::new));
@@ -132,11 +133,11 @@ public class NodeTypeProvisioningTest {
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(11, nodes.size());
// Verify that the node is still marked as retired
- assertTrue(tester.nodeRepository().nodes().getNode(nodeToRetire.hostname())
+ assertTrue(tester.nodeRepository().nodes().node(nodeToRetire.hostname())
.flatMap(Node::allocation)
.map(allocation -> allocation.membership().retired())
.orElseThrow(RuntimeException::new));
@@ -149,11 +150,11 @@ public class NodeTypeProvisioningTest {
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(10, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(10, nodes.size());
// Verify that the node is now inactive
- assertEquals(Node.State.dirty, tester.nodeRepository().nodes().getNode(nodeToRetire.hostname())
+ assertEquals(Node.State.dirty, tester.nodeRepository().nodes().node(nodeToRetire.hostname())
.orElseThrow(RuntimeException::new).state());
}
}
@@ -176,11 +177,11 @@ public class NodeTypeProvisioningTest {
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals("Reserved all proxies", 11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals("Activated all proxies", 11, nodes.size());
}
- List<Node> nodesToRetire = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active)
+ List<Node> nodesToRetire = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy).asList()
.subList(3, 3 + numNodesToRetire);
String currentyRetiringHostname;
{
@@ -190,7 +191,7 @@ public class NodeTypeProvisioningTest {
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(11, nodes.size());
// Verify that wantToRetire has been propagated
@@ -208,7 +209,7 @@ public class NodeTypeProvisioningTest {
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(11, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(11, nodes.size());
// Verify that wantToRetire has been propagated
@@ -228,11 +229,11 @@ public class NodeTypeProvisioningTest {
List<HostSpec> hosts = deployProxies(application, tester);
assertEquals(10, hosts.size());
tester.activate(application, new HashSet<>(hosts));
- List<Node> nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active);
+ NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy);
assertEquals(10, nodes.size());
// Verify the node we previously set to retire has finished retiring
- assertEquals(Node.State.dirty, tester.nodeRepository().nodes().getNode(currentyRetiringHostname)
+ assertEquals(Node.State.dirty, tester.nodeRepository().nodes().node(currentyRetiringHostname)
.orElseThrow(RuntimeException::new).state());
// Verify that a node is currently retiring
@@ -257,7 +258,7 @@ public class NodeTypeProvisioningTest {
}
// After a long time, all currently active proxy nodes are not marked with wantToRetire or as retired
- long numRetiredActiveProxyNodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active).stream()
+ long numRetiredActiveProxyNodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy).stream()
.filter(node -> !node.status().wantToRetire())
.filter(node -> !node.allocation().get().membership().retired())
.count();
@@ -265,7 +266,7 @@ public class NodeTypeProvisioningTest {
// All the nodes that were marked with wantToRetire earlier are now dirty
assertEquals(nodesToRetire.stream().map(Node::hostname).collect(Collectors.toSet()),
- tester.nodeRepository().nodes().getNodes(Node.State.dirty).stream().map(Node::hostname).collect(Collectors.toSet()));
+ tester.nodeRepository().nodes().list(Node.State.dirty).stream().map(Node::hostname).collect(Collectors.toSet()));
}
private List<HostSpec> deployProxies(ApplicationId application, ProvisioningTester tester) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 33f33836b8d..9af137aa6d8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -32,7 +32,6 @@ import com.yahoo.vespa.service.duper.InfraApplication;
import org.junit.Test;
import java.time.Duration;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
@@ -95,7 +94,8 @@ public class ProvisioningTest {
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
HostSpec removed = tester.removeOne(state5.allHosts);
tester.activate(application1, state5.allHosts);
- assertEquals(removed.hostname(), tester.nodeRepository().nodes().getNodes(application1, Node.State.inactive).get(0).hostname());
+ assertEquals(removed.hostname(),
+ tester.nodeRepository().nodes().list(Node.State.inactive).owner(application1).first().get().hostname());
// remove some of the clusters
SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester);
@@ -107,14 +107,14 @@ public class ProvisioningTest {
NodeList previouslyActive = tester.getNodes(application1, Node.State.active);
NodeList previouslyInactive = tester.getNodes(application1, Node.State.inactive);
tester.remove(application1);
- assertEquals(tester.toHostNames(previouslyActive.not().container().asList()),
- tester.toHostNames(tester.nodeRepository().nodes().getNodes(application1, Node.State.inactive)));
- assertTrue(tester.nodeRepository().nodes().getNodes(Node.State.dirty).containsAll(previouslyActive.container().asList()));
+ assertEquals(tester.toHostNames(previouslyActive.not().container()),
+ tester.toHostNames(tester.nodeRepository().nodes().list(Node.State.inactive).owner(application1)));
+ assertTrue(tester.nodeRepository().nodes().list(Node.State.dirty).asList().containsAll(previouslyActive.container().asList()));
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertTrue(tester.nodeRepository().applications().get(application1).isEmpty());
// other application is unaffected
- assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().nodes().getNodes(application2, Node.State.active)));
+ assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().nodes().list(Node.State.active).owner(application2)));
// fail a node from app2 and make sure it does not get inactive nodes from first
HostSpec failed = tester.removeOne(state1App2.allHosts);
@@ -127,7 +127,7 @@ public class ProvisioningTest {
tester.activate(application2, state2App2.allHosts);
// deploy first app again
- tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().getNodes(Node.State.dirty), Agent.system, "recycled");
+ tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().list(Node.State.dirty).asList(), Agent.system, "recycled");
SystemState state7 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
state7.assertEquals(state1);
tester.activate(application1, state7.allHosts);
@@ -160,7 +160,7 @@ public class ProvisioningTest {
HostSpec host1 = state1.container0.iterator().next();
assertFalse(host1.version().isPresent());
- Node node1 = tester.nodeRepository().nodes().getNode(host1.hostname()).get();
+ Node node1 = tester.nodeRepository().nodes().node(host1.hostname()).get();
tester.nodeRepository().nodes().write(node1.with(node1.status().withVespaVersion(Version.fromString("1.2.3"))), () -> {});
// redeploy
@@ -186,7 +186,7 @@ public class ProvisioningTest {
tester.activate(application1, state1.allHosts);
HostSpec host1 = state1.container0.iterator().next();
- Node node1 = tester.nodeRepository().nodes().getNode(host1.hostname()).get();
+ Node node1 = tester.nodeRepository().nodes().node(host1.hostname()).get();
DockerImage dockerImage = DockerImage.fromString(dockerImageRepo).withTag(Version.fromString("1.2.3"));
tester.nodeRepository().nodes().write(node1.with(node1.status().withContainerImage(dockerImage)), () -> {});
@@ -195,7 +195,7 @@ public class ProvisioningTest {
tester.activate(application1, state2.allHosts);
host1 = state2.container0.iterator().next();
- node1 = tester.nodeRepository().nodes().getNode(host1.hostname()).get();
+ node1 = tester.nodeRepository().nodes().node(host1.hostname()).get();
assertEquals(dockerImage, node1.status().containerImage().get());
}
@@ -222,7 +222,7 @@ public class ProvisioningTest {
SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state3.allHosts);
assertEquals("Superfluous container nodes are dirtyed",
- 3-2 + 4-2, tester.nodeRepository().nodes().getNodes(Node.State.dirty).size());
+ 3-2 + 4-2, tester.nodeRepository().nodes().list(Node.State.dirty).size());
assertEquals("Superfluous content nodes are retired",
4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size());
@@ -245,7 +245,7 @@ public class ProvisioningTest {
SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester);
tester.activate(application1, state5.allHosts);
assertEquals("Superfluous container nodes are also dirtyed",
- 4-2 + 5-2 + 1 + 4-2, tester.nodeRepository().nodes().getNodes(Node.State.dirty).size());
+ 4-2 + 5-2 + 1 + 4-2, tester.nodeRepository().nodes().list(Node.State.dirty).size());
assertEquals("Superfluous content nodes are retired",
5-3 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size());
@@ -289,7 +289,7 @@ public class ProvisioningTest {
// redeploy with increased sizes and new flavor
SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester);
- assertEquals("New nodes are reserved", 16, tester.nodeRepository().nodes().getNodes(application1, Node.State.reserved).size());
+ assertEquals("New nodes are reserved", 16, tester.nodeRepository().nodes().list(Node.State.reserved).owner(application1).size());
tester.activate(application1, state3.allHosts);
assertEquals("small container nodes are retired because we are swapping the entire cluster",
2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size());
@@ -316,8 +316,8 @@ public class ProvisioningTest {
SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester);
tester.activate(application1, state1.allHosts);
- tester.nodeRepository().nodes().getNodes(application1)
- .forEach(n -> assertEquals(large, tester.nodeRepository().nodes().getNode(n.parentHostname().get()).get().resources()));
+ tester.nodeRepository().nodes().list().owner(application1)
+ .forEach(n -> assertEquals(large, tester.nodeRepository().nodes().node(n.parentHostname().get()).get().resources()));
}
@Test
@@ -374,7 +374,7 @@ public class ProvisioningTest {
assertEquals(6, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
- assertTrue(tester.nodeRepository().nodes().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
+ assertTrue(tester.nodeRepository().nodes().list().owner(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
{
@@ -386,7 +386,7 @@ public class ProvisioningTest {
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast));
- assertTrue(tester.nodeRepository().nodes().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
+ assertTrue(tester.nodeRepository().nodes().list().owner(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
}
{
@@ -397,7 +397,7 @@ public class ProvisioningTest {
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
- assertTrue(tester.nodeRepository().nodes().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
+ assertTrue(tester.nodeRepository().nodes().list().owner(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
}
@@ -692,25 +692,25 @@ public class ProvisioningTest {
// Allocate 5 nodes
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacity));
- assertEquals(5, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).not().retired().size());
- assertEquals(0, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size());
+ assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).not().retired().size());
+ assertEquals(0, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
// Mark the nodes as want to retire
- tester.nodeRepository().nodes().getNodes(application, Node.State.active).forEach(node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, tester.clock().instant())));
+ tester.nodeRepository().nodes().list(Node.State.active).owner(application).forEach(node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, tester.clock().instant())));
// redeploy without allow failing
tester.activate(application, tester.prepare(application, cluster, capacityFORCED));
// Nodes are not retired since that is unsafe when we cannot fail
- assertEquals(5, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).not().retired().size());
- assertEquals(0, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size());
+ assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).not().retired().size());
+ assertEquals(0, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
// ... but we still want to
- tester.nodeRepository().nodes().getNodes(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire()));
+ tester.nodeRepository().nodes().list(Node.State.active).owner(application).forEach(node -> assertTrue(node.status().wantToRetire()));
// redeploy with allowing failing
tester.activate(application, tester.prepare(application, cluster, capacity));
// ... old nodes are now retired
- assertEquals(5, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).not().retired().size());
- assertEquals(5, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size());
+ assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).not().retired().size());
+ assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
}
@Test
@@ -723,17 +723,17 @@ public class ProvisioningTest {
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacityCanFail));
- assertEquals(0, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size());
+ assertEquals(0, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
- tester.patchNode(tester.nodeRepository().nodes().getNodes(application).stream().findAny().orElseThrow(), n -> n.withWantToRetire(true, Agent.system, tester.clock().instant()));
+ tester.patchNode(tester.nodeRepository().nodes().list().owner(application).stream().findAny().orElseThrow(), n -> n.withWantToRetire(true, Agent.system, tester.clock().instant()));
tester.activate(application, tester.prepare(application, cluster, capacityCanFail));
- assertEquals(1, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size());
- assertEquals(6, tester.nodeRepository().nodes().getNodes(application, Node.State.active).size());
+ assertEquals(1, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
+ assertEquals(6, tester.nodeRepository().nodes().list(Node.State.active).owner(application).size());
Capacity capacityCannotFail = Capacity.from(new ClusterResources(5, 1, defaultResources), false, false);
tester.activate(application, tester.prepare(application, cluster, capacityCannotFail));
- assertEquals(1, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size());
- assertEquals(6, tester.nodeRepository().nodes().getNodes(application, Node.State.active).size());
+ assertEquals(1, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
+ assertEquals(6, tester.nodeRepository().nodes().list(Node.State.active).owner(application).size());
}
@Test
@@ -784,7 +784,7 @@ public class ProvisioningTest {
// Re-deploy application with 1 node less, the retired node should be on the spare host
tester.deploy(application, spec, Capacity.from(new ClusterResources(5, 1, defaultResources)));
- assertTrue(tester.nodeRepository().nodes().getNode(randomNode.hostname()).get().allocation().get().membership().retired());
+ assertTrue(tester.nodeRepository().nodes().node(randomNode.hostname()).get().allocation().get().membership().retired());
}
@Test
@@ -901,7 +901,7 @@ public class ProvisioningTest {
try {
prepareAndActivate.apply(cfgApp);
} catch (ParentHostUnavailableException ignored) { }
- assertEquals(2, tester.nodeRepository().nodes().list(cfgApp.getApplicationId()).state(Node.State.reserved).size());
+ assertEquals(2, tester.nodeRepository().nodes().list().owner(cfgApp.getApplicationId()).state(Node.State.reserved).size());
prepareAndActivate.apply(cfgHostApp);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index fde02c083dd..23f504a9c0f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -143,7 +143,7 @@ public class ProvisioningTester {
public NodeRepositoryProvisioner provisioner() { return provisioner; }
public LoadBalancerServiceMock loadBalancerService() { return loadBalancerService; }
public CapacityPolicies capacityPolicies() { return capacityPolicies; }
- public NodeList getNodes(ApplicationId id, Node.State ... inState) { return NodeList.copyOf(nodeRepository.nodes().getNodes(id, inState)); }
+ public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(inState).owner(id); }
public Node patchNode(Node node, UnaryOperator<Node> patcher) {
return patchNodes(List.of(node), patcher).get(0);
@@ -170,12 +170,12 @@ public class ProvisioningTester {
}
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity) {
- Set<String> reservedBefore = toHostNames(nodeRepository.nodes().getNodes(application, Node.State.reserved));
- Set<String> inactiveBefore = toHostNames(nodeRepository.nodes().getNodes(application, Node.State.inactive));
+ Set<String> reservedBefore = toHostNames(nodeRepository.nodes().list(Node.State.reserved).owner(application));
+ Set<String> inactiveBefore = toHostNames(nodeRepository.nodes().list(Node.State.inactive).owner(application));
List<HostSpec> hosts1 = provisioner.prepare(application, cluster, capacity, provisionLogger);
List<HostSpec> hosts2 = provisioner.prepare(application, cluster, capacity, provisionLogger);
assertEquals("Prepare is idempotent", hosts1, hosts2);
- Set<String> newlyActivated = toHostNames(nodeRepository.nodes().getNodes(application, Node.State.reserved));
+ Set<String> newlyActivated = toHostNames(nodeRepository.nodes().list(Node.State.reserved).owner(application));
newlyActivated.removeAll(reservedBefore);
newlyActivated.removeAll(inactiveBefore);
return hosts1;
@@ -193,7 +193,7 @@ public class ProvisioningTester {
nodeRepository.nodes().write(node, lock);
}
if (node.parentHostname().isEmpty()) continue;
- Node parent = nodeRepository.nodes().getNode(node.parentHostname().get()).get();
+ Node parent = nodeRepository.nodes().node(node.parentHostname().get()).get();
if (parent.state() == Node.State.active) continue;
NestedTransaction t = new NestedTransaction();
if (parent.ipConfig().primary().isEmpty())
@@ -213,7 +213,7 @@ public class ProvisioningTester {
provisioner.activate(hosts, new ActivationContext(0), new ApplicationTransaction(lock, transaction));
transaction.commit();
}
- assertEquals(toHostNames(hosts), toHostNames(nodeRepository.nodes().getNodes(application, Node.State.active)));
+ assertEquals(toHostNames(hosts), toHostNames(nodeRepository.nodes().list(Node.State.active).owner(application)));
return hosts;
}
@@ -250,7 +250,7 @@ public class ProvisioningTester {
return hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet());
}
- public Set<String> toHostNames(List<Node> nodes) {
+ public Set<String> toHostNames(NodeList nodes) {
return nodes.stream().map(Node::hostname).collect(Collectors.toSet());
}
@@ -259,7 +259,7 @@ public class ProvisioningTester {
* number of matches to the given filters
*/
public void assertRestartCount(ApplicationId application, HostFilter... filters) {
- for (Node node : nodeRepository.nodes().getNodes(application, Node.State.active)) {
+ for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
int expectedRestarts = 0;
for (HostFilter filter : filters)
if (NodeHostFilter.from(filter).matches(node))
@@ -316,9 +316,9 @@ public class ProvisioningTester {
}
public void fail(String hostname) {
- int beforeFailCount = nodeRepository.nodes().getNode(hostname, Node.State.active).get().status().failCount();
+ int beforeFailCount = nodeRepository.nodes().node(hostname, Node.State.active).get().status().failCount();
Node failedNode = nodeRepository.nodes().fail(hostname, Agent.system, "Failing to unit test");
- assertTrue(nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).contains(failedNode));
+ assertTrue(nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().contains(failedNode));
assertEquals(beforeFailCount + 1, failedNode.status().failCount());
}
@@ -441,7 +441,7 @@ public class ProvisioningTester {
return nodes;
}
- public List<Node> makeConfigServers(int n, String flavor, Version configServersVersion) {
+ public NodeList makeConfigServers(int n, String flavor, Version configServersVersion) {
List<Node> nodes = new ArrayList<>(n);
MockNameResolver nameResolver = (MockNameResolver)nodeRepository().nameResolver();
@@ -464,7 +464,7 @@ public class ProvisioningTester {
application.getClusterSpecWithVersion(configServersVersion),
application.getCapacity());
activate(application.getApplicationId(), new HashSet<>(hosts));
- return nodeRepository.nodes().getNodes(application.getApplicationId(), Node.State.active);
+ return nodeRepository.nodes().list(Node.State.active).owner(application.getApplicationId());
}
public List<Node> makeReadyNodes(int n, String flavor, NodeType type, int ipAddressPoolSize) {
@@ -560,8 +560,8 @@ public class ProvisioningTester {
}
public void assertAllocatedOn(String explanation, String hostFlavor, ApplicationId app) {
- for (Node node : nodeRepository.nodes().getNodes(app)) {
- Node parent = nodeRepository.nodes().getNode(node.parentHostname().get()).get();
+ for (Node node : nodeRepository.nodes().list().owner(app)) {
+ Node parent = nodeRepository.nodes().node(node.parentHostname().get()).get();
assertEquals(node + ": " + explanation, hostFlavor, parent.flavor().name());
}
}
@@ -594,10 +594,10 @@ public class ProvisioningTester {
}
public int hostFlavorCount(String hostFlavor, ApplicationId app) {
- return (int)nodeRepository().nodes().getNodes(app).stream()
- .map(n -> nodeRepository().nodes().getNode(n.parentHostname().get()).get())
- .filter(p -> p.flavor().name().equals(hostFlavor))
- .count();
+ return (int)nodeRepository().nodes().list().owner(app).stream()
+ .map(n -> nodeRepository().nodes().node(n.parentHostname().get()).get())
+ .filter(p -> p.flavor().name().equals(hostFlavor))
+ .count();
}
public static final class Builder {