summaryrefslogtreecommitdiffstats
path: root/node-repository
diff options
context:
space:
mode:
authorJon Bratseth <bratseth@gmail.com>2021-02-10 20:57:38 +0100
committerJon Bratseth <bratseth@gmail.com>2021-02-10 20:57:38 +0100
commit8e1cae2a0725567b15d49b99317523853cb033e4 (patch)
treefb4bc6a5f1362922b180447538579f2369dae2ce /node-repository
parent8834dbc6c85f1786f1a9e250ac7cc85bebe64491 (diff)
Remove unnecessary method
Diffstat (limited to 'node-repository')
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java11
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java12
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java22
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java12
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java26
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java28
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java46
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java18
31 files changed, 126 insertions, 145 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index beec04b3b29..55495669802 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -193,7 +193,7 @@ public class NodeRepository extends AbstractComponent {
/** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */
public void remove(ApplicationTransaction transaction) {
- NodeList applicationNodes = nodes().list(transaction.application());
+ NodeList applicationNodes = nodes().list().owner(transaction.application());
NodeList activeNodes = applicationNodes.state(State.active);
nodes().deactivate(activeNodes.asList(), transaction);
db.writeTo(State.dirty,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 2f01f6b31ae..4ab6f259374 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -176,7 +176,7 @@ public class Autoscaler {
return false;
// A deployment is ongoing
- if (nodeRepository.nodes().list(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0)
+ if (nodeRepository.nodes().list(Node.State.reserved).owner(nodes.first().get().allocation().get().owner()).size() > 0)
return false;
return true;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java
index b93c7930b5b..b2d8ddfd414 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java
@@ -51,7 +51,7 @@ public class MetricsV2MetricsFetcher extends AbstractComponent implements Metric
@Override
public CompletableFuture<MetricsResponse> fetchMetrics(ApplicationId application) {
- NodeList applicationNodes = nodeRepository.nodes().list(application).state(Node.State.active);
+ NodeList applicationNodes = nodeRepository.nodes().list().owner(application).state(Node.State.active);
Optional<Node> metricsV2Container = applicationNodes.container()
.matching(node -> expectedUp(node))
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
index 11292f9aa60..860076dd111 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
@@ -116,7 +116,7 @@ class MaintenanceDeployment implements Closeable {
Deployer deployer,
NodeRepository nodeRepository) {
if (lock.isEmpty()) return Optional.empty();
- if (nodeRepository.nodes().list(application, Node.State.active).isEmpty()) return Optional.empty();
+ if (nodeRepository.nodes().list(Node.State.active).owner(application).isEmpty()) return Optional.empty();
return deployer.deployFromLocalActive(application);
}
@@ -168,7 +168,7 @@ class MaintenanceDeployment implements Closeable {
if ( ! deployment.prepare()) return false;
if (verifyTarget) {
expectedNewNode =
- nodeRepository.nodes().list(application, Node.State.reserved).stream()
+ nodeRepository.nodes().list(Node.State.reserved).owner(application).stream()
.filter(n -> !n.hostname().equals(node.hostname()))
.filter(n -> n.allocation().get().membership().cluster().id().equals(node.allocation().get().membership().cluster().id()))
.findAny();
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index b7d7ad14b33..3e94201d87a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -83,15 +83,6 @@ public class Nodes {
return NodeList.copyOf(db.readNodes(inState));
}
- /**
- * Returns a list of nodes in this repository for an application in any of the given states
- *
- * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
- */
- public NodeList list(ApplicationId application, Node.State... inState) {
- return NodeList.copyOf(db.readNodes(application, inState));
- }
-
/** Returns a locked list of all nodes in this repository */
public LockedNodeList list(Mutex lock) {
return new LockedNodeList(list().asList(), lock);
@@ -401,7 +392,7 @@ public class Nodes {
// TODO: Work out a safe lock acquisition strategy for moves, e.g. migrate to lockNode.
try (Mutex lock = lock(node)) {
if (toState == Node.State.active) {
- for (Node currentActive : list(node.allocation().get().owner(), Node.State.active)) {
+ for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) {
if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster())
&& node.allocation().get().membership().index() == currentActive.allocation().get().membership().index())
illegal("Could not set " + node + " active: Same cluster and index as " + currentActive);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
index 95445ad0a66..6150ee9f4a0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
@@ -278,16 +278,6 @@ public class CuratorDatabaseClient {
return nodes;
}
- /**
- * Returns all nodes allocated to the given application which are in one of the given states
- * If no states are given this returns all nodes.
- */
- public List<Node> readNodes(ApplicationId applicationId, Node.State ... states) {
- List<Node> nodes = readNodes(states);
- nodes.removeIf(node -> ! node.allocation().isPresent() || ! node.allocation().get().owner().equals(applicationId));
- return nodes;
- }
-
/**
* Returns a particular node, or empty if this node is not in any of the given states.
* If no states are given this returns the node if it is present in any state.
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index a5057bd1134..79e1005eb47 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -150,7 +150,7 @@ public class NodeRepositoryProvisioner implements Provisioner {
private ClusterResources currentResources(ApplicationId applicationId,
ClusterSpec clusterSpec,
Capacity requested) {
- List<Node> nodes = nodeRepository.nodes().list(applicationId, Node.State.active)
+ List<Node> nodes = nodeRepository.nodes().list(Node.State.active).owner(applicationId)
.cluster(clusterSpec.id())
.not().retired()
.not().removable()
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index 41a6b0d42b1..87b3742efb4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -85,7 +85,7 @@ class Preparer {
*/
private List<Node> findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) {
List<Node> surplusNodes = new ArrayList<>(0);
- for (Node node : nodeRepository.nodes().list(application, Node.State.active)) {
+ for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(requestedCluster.id())) continue;
if ( ! nodeCluster.type().equals(requestedCluster.type())) continue;
@@ -127,7 +127,7 @@ class Preparer {
*/
private int findHighestIndex(ApplicationId application, ClusterSpec cluster) {
int highestIndex = -1;
- for (Node node : nodeRepository.nodes().list(application, Node.State.allocatedStates().toArray(new Node.State[0]))) {
+ for (Node node : nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0])).owner(application)) {
ClusterSpec nodeCluster = node.allocation().get().membership().cluster();
if ( ! nodeCluster.id().equals(cluster.id())) continue;
if ( ! nodeCluster.type().equals(cluster.type())) continue;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
index 56b46c106ee..c4c11222702 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
@@ -435,7 +435,7 @@ public class NodesV2ApiHandler extends LoggingRequestHandler {
if (application.isEmpty())
return ErrorResponse.notFoundError("No application '" + id + "'");
Slime slime = ApplicationSerializer.toSlime(application.get(),
- nodeRepository.nodes().list(id, Node.State.active).asList(),
+ nodeRepository.nodes().list(Node.State.active).owner(id).asList(),
withPath("/nodes/v2/applications/" + id, uri));
return new SlimeJsonResponse(slime);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java
index d2a2544c89c..23c20adb842 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java
@@ -70,7 +70,7 @@ public class ServiceMonitorStub implements ServiceMonitor {
Map<ApplicationInstanceReference, ApplicationInstance> status = new HashMap<>();
for (Map.Entry<ApplicationId, MockDeployer.ApplicationContext> app : apps.entrySet()) {
Set<ServiceInstance> serviceInstances = new HashSet<>();
- for (Node node : nodeRepository.nodes().list(app.getValue().id(), Node.State.active)) {
+ for (Node node : nodeRepository.nodes().list(Node.State.active).owner(app.getValue().id())) {
serviceInstances.add(new ServiceInstance(new ConfigId("configid"),
new HostName(node.hostname()),
getHostStatus(node.hostname())));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
index b17c8fc35b6..60eb66c1779 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
@@ -80,7 +80,7 @@ public class RealDataScenarioTest {
};
deploy(tester, app, specs, capacities);
- tester.nodeRepository().nodes().list(app).cluster(specs[1].id()).forEach(System.out::println);
+ tester.nodeRepository().nodes().list().owner(app).cluster(specs[1].id()).forEach(System.out::println);
}
private void deploy(ProvisioningTester tester, ApplicationId app, ClusterSpec[] specs, Capacity[] capacities) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
index 5f7b4bc865c..3a74c3a3cf6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
@@ -60,7 +60,7 @@ public class AutoscalingIntegrationTest {
tester.nodeRepository().applications().put(application, lock);
}
var scaledResources = autoscaler.suggest(application.clusters().get(cluster1.id()),
- tester.nodeRepository().nodes().list(application1));
+ tester.nodeRepository().nodes().list().owner(application1));
assertTrue(scaledResources.isPresent());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index dbb8c00e44d..dbab02302f8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -116,7 +116,7 @@ public class AutoscalingTest {
// deploy with slow
tester.deploy(application1, cluster1, 5, 1, hostResources);
- tester.nodeRepository().nodes().list(application1).stream()
+ tester.nodeRepository().nodes().list().owner(application1).stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.slow);
tester.clock().advance(Duration.ofDays(2));
@@ -132,7 +132,7 @@ public class AutoscalingTest {
assertEquals("Disk speed from min/max is used",
NodeResources.DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
tester.deploy(application1, cluster1, scaledResources);
- tester.nodeRepository().nodes().list(application1).stream()
+ tester.nodeRepository().nodes().list().owner(application1).stream()
.allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
index 0581f9f84b3..eb490079c98 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
@@ -106,7 +106,7 @@ class AutoscalingTester {
public void deactivateRetired(ApplicationId application, ClusterSpec cluster, ClusterResources resources) {
try (Mutex lock = nodeRepository().nodes().lock(application)){
- for (Node node : nodeRepository().nodes().list(application, Node.State.active)) {
+ for (Node node : nodeRepository().nodes().list(Node.State.active).owner(application)) {
if (node.allocation().get().membership().retired())
nodeRepository().nodes().write(node.with(node.allocation().get().removable(true)), lock);
}
@@ -126,7 +126,7 @@ class AutoscalingTester {
*/
public void addCpuMeasurements(float value, float otherResourcesLoad,
int count, ApplicationId applicationId) {
- NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active);
+ NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size());
for (int i = 0; i < count; i++) {
clock().advance(Duration.ofMinutes(1));
@@ -157,7 +157,7 @@ class AutoscalingTester {
*/
public void addMemMeasurements(float value, float otherResourcesLoad,
int count, ApplicationId applicationId) {
- NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active);
+ NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size());
for (int i = 0; i < count; i++) {
clock().advance(Duration.ofMinutes(1));
@@ -182,7 +182,7 @@ class AutoscalingTester {
public void addMeasurements(float cpu, float memory, float disk, int generation, boolean inService, boolean stable,
int count, ApplicationId applicationId) {
- NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active);
+ NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
for (int i = 0; i < count; i++) {
clock().advance(Duration.ofMinutes(1));
for (Node node : nodes) {
@@ -205,7 +205,7 @@ class AutoscalingTester {
nodeRepository().applications().put(application, lock);
}
return autoscaler.autoscale(application.clusters().get(clusterId),
- nodeRepository().nodes().list(applicationId, Node.State.active));
+ nodeRepository().nodes().list(Node.State.active).owner(applicationId));
}
public Autoscaler.Advice suggest(ApplicationId applicationId, ClusterSpec.Id clusterId,
@@ -216,7 +216,7 @@ class AutoscalingTester {
nodeRepository().applications().put(application, lock);
}
return autoscaler.suggest(application.clusters().get(clusterId),
- nodeRepository().nodes().list(applicationId, Node.State.active));
+ nodeRepository().nodes().list(Node.State.active).owner(applicationId));
}
public ClusterResources assertResources(String message,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java
index 9ac9a182512..384e8dd8439 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java
@@ -78,7 +78,7 @@ public class MetricsV2MetricsFetcherTest {
{
httpClient.cannedResponse = cannedResponseForApplication2;
try (Mutex lock = tester.nodeRepository().nodes().lock(application1)) {
- tester.nodeRepository().nodes().write(tester.nodeRepository().nodes().list(application2, Node.State.active)
+ tester.nodeRepository().nodes().write(tester.nodeRepository().nodes().list(Node.State.active).owner(application2)
.first().get().retire(tester.clock().instant()), lock);
}
List<Pair<String, MetricSnapshot>> values = new ArrayList<>(fetcher.fetchMetrics(application2).get().metrics());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java
index d9ecd0c6653..887ce158e09 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java
@@ -72,7 +72,7 @@ public class AutoscalingMaintainerTester {
}
public void addMeasurements(float cpu, float mem, float disk, long generation, int count, ApplicationId applicationId) {
- NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active);
+ NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId);
for (int i = 0; i < count; i++) {
for (Node node : nodes)
metricsDb.add(List.of(new Pair<>(node.hostname(), new MetricSnapshot(clock().instant(),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
index 32b5f567341..189b32028a2 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java
@@ -132,7 +132,7 @@ public class LoadBalancerExpirerTest {
}
private void dirtyNodesOf(ApplicationId application, ClusterSpec.Id cluster) {
- tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list(application).stream()
+ tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list().owner(application).stream()
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().membership().cluster().id().equals(cluster))
.collect(Collectors.toList()),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
index 5ad01902125..9801233f396 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
@@ -103,8 +103,8 @@ public class NodeFailTester {
tester.activate(app1, clusterApp1, capacity1);
tester.activate(app2, clusterApp2, capacity2);
- assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(app1, Node.State.active).size());
- assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(app2, Node.State.active).size());
+ assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app1).size());
+ assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app2).size());
Map<ApplicationId, MockDeployer.ApplicationContext> apps = Map.of(
app1, new MockDeployer.ApplicationContext(app1, clusterApp1, capacity1),
@@ -134,9 +134,9 @@ public class NodeFailTester {
tester.activate(app1, clusterApp1, capacity1);
tester.activate(app2, clusterApp2, capacity2);
assertEquals(Set.of(tester.nodeRepository.nodes().list().nodeType(NodeType.host).asList()),
- Set.of(tester.nodeRepository.nodes().list(tenantHostApp, Node.State.active).asList()));
- assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(app1, Node.State.active).size());
- assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(app2, Node.State.active).size());
+ Set.of(tester.nodeRepository.nodes().list(Node.State.active).owner(tenantHostApp).asList()));
+ assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app1).size());
+ assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app2).size());
Map<ApplicationId, MockDeployer.ApplicationContext> apps = Map.of(
tenantHostApp, new MockDeployer.ApplicationContext(tenantHostApp, clusterNodeAdminApp, allHosts),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
index 16afcb85020..ca1fa2831b8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
@@ -174,8 +174,8 @@ public class NodeFailerTest {
tester.suspend(NodeFailTester.app1);
// Set two nodes down (one for each application) and wait 65 minutes
- String host_from_suspended_app = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname();
- String host_from_normal_app = tester.nodeRepository.nodes().list(NodeFailTester.app2, Node.State.active).asList().get(3).hostname();
+ String host_from_suspended_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
+ String host_from_normal_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(host_from_suspended_app);
tester.serviceMonitor.setHostDown(host_from_normal_app);
tester.runMaintainers();
@@ -192,15 +192,15 @@ public class NodeFailerTest {
public void zone_is_not_working_if_too_many_nodes_down() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
- tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(0).hostname());
+ tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(0).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
- tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname());
+ tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname());
tester.runMaintainers();
assertTrue(tester.nodeRepository.nodes().isWorking());
- tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(2).hostname());
+ tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(2).hostname());
tester.runMaintainers();
assertFalse(tester.nodeRepository.nodes().isWorking());
@@ -236,8 +236,8 @@ public class NodeFailerTest {
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail1.hostname()).get().state());
assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail2.hostname()).get().state());
- String downHost1 = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname();
- String downHost2 = tester.nodeRepository.nodes().list(NodeFailTester.app2, Node.State.active).asList().get(3).hostname();
+ String downHost1 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
+ String downHost2 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname();
tester.serviceMonitor.setHostDown(downHost1);
tester.serviceMonitor.setHostDown(downHost2);
// nothing happens the first 45 minutes
@@ -281,7 +281,7 @@ public class NodeFailerTest {
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
// the last host goes down
- Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active));
+ Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1));
tester.serviceMonitor.setHostDown(lastNode.hostname());
// it is not failed because there are no ready nodes to replace it
for (int minutes = 0; minutes < 75; minutes +=5 ) {
@@ -305,7 +305,7 @@ public class NodeFailerTest {
assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertTrue("The index of the last failed node is not reused",
- tester.highestIndex(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active)).allocation().get().membership().index()
+ tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1)).allocation().get().membership().index()
>
lastNode.allocation().get().membership().index());
}
@@ -313,7 +313,7 @@ public class NodeFailerTest {
@Test
public void re_activate_grace_period_test() {
NodeFailTester tester = NodeFailTester.withTwoApplications();
- String downNode = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname();
+ String downNode = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname();
tester.serviceMonitor.setHostDown(downNode);
tester.allNodesMakeAConfigRequestExcept();
@@ -350,7 +350,7 @@ public class NodeFailerTest {
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build();
tester.activate(NodeFailTester.app1, cluster, capacity);
- String downHost = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).first().get().hostname();
+ String downHost = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).first().get().hostname();
tester.serviceMonitor.setHostDown(downHost);
// nothing happens the first 45 minutes
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
index b90f98c9ad8..db6aebacddc 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
@@ -51,19 +51,19 @@ public class OperatorChangeApplicationMaintainerTest {
maintainer.maintain();
assertEquals("No changes -> no redeployments", 3, fixture.deployer.redeployments);
- nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test");
+ nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("System change -> no redeployments", 3, fixture.deployer.redeployments);
clock.advance(Duration.ofSeconds(1));
- nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app2).asList().get(4).hostname(), Agent.operator, "Manual node failing");
+ nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app2).asList().get(4).hostname(), Agent.operator, "Manual node failing");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("Operator change -> redeployment", 4, fixture.deployer.redeployments);
clock.advance(Duration.ofSeconds(1));
- nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app3).asList().get(1).hostname(), Agent.operator, "Manual node failing");
+ nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app3).asList().get(1).hostname(), Agent.operator, "Manual node failing");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
assertEquals("Operator change -> redeployment", 5, fixture.deployer.redeployments);
@@ -104,9 +104,9 @@ public class OperatorChangeApplicationMaintainerTest {
deployer.deployFromLocalActive(app1, false).get().activate();
deployer.deployFromLocalActive(app2, false).get().activate();
deployer.deployFromLocalActive(app3, false).get().activate();
- assertEquals(wantedNodesApp1, nodeRepository.nodes().list(app1, Node.State.active).size());
- assertEquals(wantedNodesApp2, nodeRepository.nodes().list(app2, Node.State.active).size());
- assertEquals(wantedNodesApp3, nodeRepository.nodes().list(app3, Node.State.active).size());
+ assertEquals(wantedNodesApp1, nodeRepository.nodes().list(Node.State.active).owner(app1).size());
+ assertEquals(wantedNodesApp2, nodeRepository.nodes().list(Node.State.active).owner(app2).size());
+ assertEquals(wantedNodesApp3, nodeRepository.nodes().list(Node.State.active).owner(app3).size());
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
index 67b10a351cb..e280a0211e4 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
@@ -64,21 +64,21 @@ public class PeriodicApplicationMaintainerTest {
fixture.setBootstrapping(false);
// Fail and park some nodes
- nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test");
- nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app2).asList().get(0).hostname(), Agent.system, "Failing to unit test");
- nodeRepository.nodes().park(nodeRepository.nodes().list(fixture.app2).asList().get(4).hostname(), true, Agent.system, "Parking to unit test");
+ nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test");
+ nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app2).asList().get(0).hostname(), Agent.system, "Failing to unit test");
+ nodeRepository.nodes().park(nodeRepository.nodes().list().owner(fixture.app2).asList().get(4).hostname(), true, Agent.system, "Parking to unit test");
int failedInApp1 = 1;
int failedOrParkedInApp2 = 2;
- assertEquals(fixture.wantedNodesApp1 - failedInApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size());
- assertEquals(fixture.wantedNodesApp2 - failedOrParkedInApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size());
+ assertEquals(fixture.wantedNodesApp1 - failedInApp1, nodeRepository.nodes().list(Node.State.active).owner(fixture.app1).size());
+ assertEquals(fixture.wantedNodesApp2 - failedOrParkedInApp2, nodeRepository.nodes().list(Node.State.active).owner(fixture.app2).size());
assertEquals(failedInApp1 + failedOrParkedInApp2, nodeRepository.nodes().list(Node.State.failed, Node.State.parked).nodeType(NodeType.tenant).size());
assertEquals(3, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size());
// Cause maintenance deployment which will allocate replacement nodes
fixture.runApplicationMaintainer();
- assertEquals(fixture.wantedNodesApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size());
- assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size());
+ assertEquals(fixture.wantedNodesApp1, nodeRepository.nodes().list(Node.State.active).owner(fixture.app1).size());
+ assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(Node.State.active).owner(fixture.app2).size());
assertEquals(0, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size());
// Reactivate the previously failed nodes
@@ -88,8 +88,8 @@ public class PeriodicApplicationMaintainerTest {
int reactivatedInApp1 = 1;
int reactivatedInApp2 = 2;
assertEquals(0, nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
- assertEquals(fixture.wantedNodesApp1 + reactivatedInApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size());
- assertEquals(fixture.wantedNodesApp2 + reactivatedInApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size());
+ assertEquals(fixture.wantedNodesApp1 + reactivatedInApp1, nodeRepository.nodes().list(Node.State.active).owner(fixture.app1).size());
+ assertEquals(fixture.wantedNodesApp2 + reactivatedInApp2, nodeRepository.nodes().list(Node.State.active).owner(fixture.app2).size());
assertEquals("The reactivated nodes are now active but not part of the application",
0, fixture.getNodes(Node.State.active).retired().size());
@@ -112,13 +112,13 @@ public class PeriodicApplicationMaintainerTest {
// Remove one application without letting the application maintainer know about it
fixture.remove(fixture.app2);
- assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(fixture.app2, Node.State.inactive).size());
+ assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(Node.State.inactive).owner(fixture.app2).size());
// Nodes belonging to app2 are inactive after maintenance
fixture.maintainer.setOverriddenNodesNeedingMaintenance(frozenActiveNodes);
fixture.runApplicationMaintainer();
assertEquals("Inactive nodes were incorrectly activated after maintenance", fixture.wantedNodesApp2,
- nodeRepository.nodes().list(fixture.app2, Node.State.inactive).size());
+ nodeRepository.nodes().list(Node.State.inactive).owner(fixture.app2).size());
}
@Test(timeout = 60_000)
@@ -232,8 +232,8 @@ public class PeriodicApplicationMaintainerTest {
void activate() {
deployer.deployFromLocalActive(app1, false).get().activate();
deployer.deployFromLocalActive(app2, false).get().activate();
- assertEquals(wantedNodesApp1, nodeRepository.nodes().list(app1, Node.State.active).size());
- assertEquals(wantedNodesApp2, nodeRepository.nodes().list(app2, Node.State.active).size());
+ assertEquals(wantedNodesApp1, nodeRepository.nodes().list(Node.State.active).owner(app1).size());
+ assertEquals(wantedNodesApp2, nodeRepository.nodes().list(Node.State.active).owner(app2).size());
}
void remove(ApplicationId application) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java
index b2ac6788566..9fddaab8b3b 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java
@@ -177,7 +177,7 @@ public class RebalancerTest {
}
List<Node> getNodes(ApplicationId applicationId, Node.State nodeState) {
- return tester.nodeRepository().nodes().list(applicationId, nodeState).asList();
+ return tester.nodeRepository().nodes().list(nodeState).owner(applicationId).asList();
}
boolean isNodeRetired(Node node) {
@@ -188,7 +188,7 @@ public class RebalancerTest {
NodeList getNodes(Node.State nodeState) { return tester.nodeRepository().nodes().list(nodeState); }
- Node getNode(ApplicationId applicationId) { return tester.nodeRepository().nodes().list(applicationId).first().get(); }
+ Node getNode(ApplicationId applicationId) { return tester.nodeRepository().nodes().list().owner(applicationId).first().get(); }
ManualClock clock() { return tester.clock(); }
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
index 129e4e3a775..c1c6e5b6154 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
@@ -71,8 +71,8 @@ public class RetiredExpirerTest {
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
- assertEquals(7, nodeRepository.nodes().list(applicationId, Node.State.active).size());
- assertEquals(0, nodeRepository.nodes().list(applicationId, Node.State.inactive).size());
+ assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
// Cause inactivation of retired nodes
clock.advance(Duration.ofHours(30)); // Retire period spent
@@ -83,12 +83,12 @@ public class RetiredExpirerTest {
cluster,
Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources)))));
createRetiredExpirer(deployer).run();
- assertEquals(3, nodeRepository.nodes().list(applicationId, Node.State.active).size());
- assertEquals(4, nodeRepository.nodes().list(applicationId, Node.State.inactive).size());
+ assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
// inactivated nodes are not retired
- for (Node node : nodeRepository.nodes().list(applicationId, Node.State.inactive))
+ for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
@@ -106,8 +106,8 @@ public class RetiredExpirerTest {
activate(applicationId, cluster, wantedNodes=7, 1);
activate(applicationId, cluster, wantedNodes=2, 1);
activate(applicationId, cluster, wantedNodes=3, 1);
- assertEquals(7, nodeRepository.nodes().list(applicationId, Node.State.active).size());
- assertEquals(0, nodeRepository.nodes().list(applicationId, Node.State.inactive).size());
+ assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
// Cause inactivation of retired nodes
MockDeployer deployer =
@@ -128,27 +128,27 @@ public class RetiredExpirerTest {
RetiredExpirer retiredExpirer = createRetiredExpirer(deployer);
retiredExpirer.run();
- assertEquals(5, nodeRepository.nodes().list(applicationId, Node.State.active).size());
- assertEquals(2, nodeRepository.nodes().list(applicationId, Node.State.inactive).size());
+ assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(4)).acquirePermissionToRemove(any());
// Running it again has no effect
retiredExpirer.run();
- assertEquals(5, nodeRepository.nodes().list(applicationId, Node.State.active).size());
- assertEquals(2, nodeRepository.nodes().list(applicationId, Node.State.inactive).size());
+ assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(1, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
clock.advance(RETIRED_EXPIRATION.plusMinutes(1));
retiredExpirer.run();
- assertEquals(3, nodeRepository.nodes().list(applicationId, Node.State.active).size());
- assertEquals(4, nodeRepository.nodes().list(applicationId, Node.State.inactive).size());
+ assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
+ assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
assertEquals(2, deployer.redeployments);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
// inactivated nodes are not retired
- for (Node node : nodeRepository.nodes().list(applicationId, Node.State.inactive))
+ for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
assertFalse(node.allocation().get().membership().retired());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
index 0d2de73635e..6581008268d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
@@ -115,14 +115,14 @@ public class ScalingSuggestionsMaintainerTest {
}
private boolean shouldSuggest(ApplicationId app, ClusterSpec cluster, ProvisioningTester tester) {
- var currentResources = tester.nodeRepository().nodes().list(app).cluster(cluster.id()).not().retired().toResources();
+ var currentResources = tester.nodeRepository().nodes().list().owner(app).cluster(cluster.id()).not().retired().toResources();
return tester.nodeRepository().applications().get(app).get().cluster(cluster.id()).get()
.shouldSuggestResources(currentResources);
}
public void addMeasurements(float cpu, float memory, float disk, int generation, int count, ApplicationId applicationId,
NodeRepository nodeRepository, MetricsDb db) {
- NodeList nodes = nodeRepository.nodes().list(applicationId, Node.State.active);
+ NodeList nodes = nodeRepository.nodes().list(Node.State.active).owner(applicationId);
for (int i = 0; i < count; i++) {
for (Node node : nodes)
db.add(List.of(new Pair<>(node.hostname(), new MetricSnapshot(nodeRepository.clock().instant(),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
index bd342df22f0..8033663c6cf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
@@ -115,7 +115,7 @@ public class AclProvisioningTest {
tester.deploy(zoneApplication, Capacity.fromRequiredNodeType(NodeType.proxy));
// Get trusted nodes for first proxy node
- NodeList proxyNodes = tester.nodeRepository().nodes().list(zoneApplication);
+ NodeList proxyNodes = tester.nodeRepository().nodes().list().owner(zoneApplication);
Node node = proxyNodes.first().get();
NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
index 55ca2d7e55a..12a8b476d5e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
@@ -366,7 +366,7 @@ public class DockerProvisioningTest {
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r)));
var tx = new ApplicationTransaction(new ProvisionLock(app1, tester.nodeRepository().nodes().lock(app1)), new NestedTransaction());
- tester.nodeRepository().nodes().deactivate(tester.nodeRepository().nodes().list(app1, Node.State.active).retired().asList(), tx);
+ tester.nodeRepository().nodes().deactivate(tester.nodeRepository().nodes().list(Node.State.active).owner(app1).retired().asList(), tx);
tx.nested().commit();
assertEquals(2, tester.getNodes(app1, Node.State.active).size());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
index 94ce5b8c5fb..cf7083ccc4f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java
@@ -315,7 +315,7 @@ public class DynamicDockerAllocationTest {
List<HostSpec> hosts = tester.prepare(application, clusterSpec("myContent.t1.a1"), 2, 1, new NodeResources(1, 4, 100, 1));
tester.activate(application, hosts);
- NodeList activeNodes = tester.nodeRepository().nodes().list(application);
+ NodeList activeNodes = tester.nodeRepository().nodes().list().owner(application);
assertEquals(ImmutableSet.of("127.0.127.13", "::13"), activeNodes.asList().get(0).ipConfig().primary());
assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.asList().get(1).ipConfig().primary());
}
@@ -437,16 +437,16 @@ public class DynamicDockerAllocationTest {
// Redeploy does not change allocation as a host with switch information is no better or worse than hosts
// without switch information
- NodeList allocatedNodes = tester.nodeRepository().nodes().list(app1);
+ NodeList allocatedNodes = tester.nodeRepository().nodes().list().owner(app1);
tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources))));
- assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list(app1));
+ assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list().owner(app1));
// Initial hosts are attached to the same switch
tester.patchNodes(hosts0, (host) -> host.withSwitchHostname(switch0));
// Redeploy does not change allocation
tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources))));
- assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list(app1));
+ assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list().owner(app1));
// One regular host and one slow-disk host are provisioned on the same switch
String switch1 = "switch1";
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
index 5f8a0c99b9f..7d8fec95d3a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
@@ -167,19 +167,19 @@ public class DynamicDockerProvisionTest {
mockHostProvisioner(hostProvisioner, "large", 3, null); // Provision shared hosts
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources);
- Set<Node> initialNodes = tester.nodeRepository().nodes().list(application1).stream().collect(Collectors.toSet());
+ Set<Node> initialNodes = tester.nodeRepository().nodes().list().owner(application1).stream().collect(Collectors.toSet());
assertEquals(4, initialNodes.size());
// Redeploy same application with exclusive=true
mockHostProvisioner(hostProvisioner, "large", 3, application1);
prepareAndActivate(application1, clusterSpec("mycluster", true), 4, 1, resources);
- assertEquals(8, tester.nodeRepository().nodes().list(application1).size());
- assertEquals(initialNodes, tester.nodeRepository().nodes().list(application1).retired().stream().collect(Collectors.toSet()));
+ assertEquals(8, tester.nodeRepository().nodes().list().owner(application1).size());
+ assertEquals(initialNodes, tester.nodeRepository().nodes().list().owner(application1).retired().stream().collect(Collectors.toSet()));
// Redeploy without exclusive again is no-op
prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources);
- assertEquals(8, tester.nodeRepository().nodes().list(application1).size());
- assertEquals(initialNodes, tester.nodeRepository().nodes().list(application1).retired().stream().collect(Collectors.toSet()));
+ assertEquals(8, tester.nodeRepository().nodes().list().owner(application1).size());
+ assertEquals(initialNodes, tester.nodeRepository().nodes().list().owner(application1).retired().stream().collect(Collectors.toSet()));
}
@Test
@@ -188,7 +188,7 @@ public class DynamicDockerProvisionTest {
ApplicationId app = ProvisioningTester.applicationId();
Function<Node, Node> retireNode = node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, Instant.now()));
- Function<Integer, Node> getNodeInGroup = group -> tester.nodeRepository().nodes().list(app).stream()
+ Function<Integer, Node> getNodeInGroup = group -> tester.nodeRepository().nodes().list().owner(app).stream()
.filter(node -> node.allocation().get().membership().cluster().group().get().index() == group)
.findAny().orElseThrow();
@@ -209,7 +209,7 @@ public class DynamicDockerProvisionTest {
tester.prepare(app, clusterSpec("content"), 8, 2, resources);
// Verify that nodes have unique indices from 0..9
- var indices = tester.nodeRepository().nodes().list(app).stream()
+ var indices = tester.nodeRepository().nodes().list().owner(app).stream()
.map(node -> node.allocation().get().membership().index())
.collect(Collectors.toSet());
assertTrue(indices.containsAll(IntStream.range(0, 10).boxed().collect(Collectors.toList())));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index a5e7704cce7..db6cc1d5fa6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -137,7 +137,7 @@ public class LoadBalancerProvisionerTest {
// Entire application is removed: Nodes and load balancer are deactivated
tester.remove(app1);
dirtyNodesOf(app1);
- assertTrue("No nodes are allocated to " + app1, tester.nodeRepository().nodes().list(app1, Node.State.reserved, Node.State.active).isEmpty());
+ assertTrue("No nodes are allocated to " + app1, tester.nodeRepository().nodes().list(Node.State.reserved, Node.State.active).owner(app1).isEmpty());
assertEquals(2, lbApp1.get().size());
assertTrue("Deactivated load balancers", lbApp1.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.inactive));
assertTrue("Load balancers for " + app2 + " remain active", lbApp2.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.active));
@@ -168,7 +168,7 @@ public class LoadBalancerProvisionerTest {
var nodes = tester.prepare(app1, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")), 2 , 1, resources);
Supplier<LoadBalancer> lb = () -> tester.nodeRepository().loadBalancers().list(app1).asList().get(0);
assertTrue("Load balancer provisioned with empty reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
- assignIps(tester.nodeRepository().nodes().list(app1));
+ assignIps(tester.nodeRepository().nodes().list().owner(app1));
tester.activate(app1, nodes);
assertFalse("Load balancer is reconfigured with reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
@@ -181,7 +181,7 @@ public class LoadBalancerProvisionerTest {
// Application is redeployed
nodes = tester.prepare(app1, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")), 2 , 1, resources);
assertTrue("Load balancer is reconfigured with empty reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
- assignIps(tester.nodeRepository().nodes().list(app1));
+ assignIps(tester.nodeRepository().nodes().list().owner(app1));
tester.activate(app1, nodes);
assertFalse("Load balancer is reconfigured with reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty());
}
@@ -270,7 +270,7 @@ public class LoadBalancerProvisionerTest {
}
private void dirtyNodesOf(ApplicationId application) {
- tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list(application).asList(), Agent.system, this.getClass().getSimpleName());
+ tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list().owner(application).asList(), Agent.system, this.getClass().getSimpleName());
}
private Set<HostSpec> prepare(ApplicationId application, ClusterSpec... specs) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 2b0541ce0c2..9af137aa6d8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -95,7 +95,7 @@ public class ProvisioningTest {
HostSpec removed = tester.removeOne(state5.allHosts);
tester.activate(application1, state5.allHosts);
assertEquals(removed.hostname(),
- tester.nodeRepository().nodes().list(application1, Node.State.inactive).first().get().hostname());
+ tester.nodeRepository().nodes().list(Node.State.inactive).owner(application1).first().get().hostname());
// remove some of the clusters
SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester);
@@ -108,13 +108,13 @@ public class ProvisioningTest {
NodeList previouslyInactive = tester.getNodes(application1, Node.State.inactive);
tester.remove(application1);
assertEquals(tester.toHostNames(previouslyActive.not().container()),
- tester.toHostNames(tester.nodeRepository().nodes().list(application1, Node.State.inactive)));
+ tester.toHostNames(tester.nodeRepository().nodes().list(Node.State.inactive).owner(application1)));
assertTrue(tester.nodeRepository().nodes().list(Node.State.dirty).asList().containsAll(previouslyActive.container().asList()));
assertEquals(0, tester.getNodes(application1, Node.State.active).size());
assertTrue(tester.nodeRepository().applications().get(application1).isEmpty());
// other application is unaffected
- assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().nodes().list(application2, Node.State.active)));
+ assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().nodes().list(Node.State.active).owner(application2)));
// fail a node from app2 and make sure it does not get inactive nodes from first
HostSpec failed = tester.removeOne(state1App2.allHosts);
@@ -289,7 +289,7 @@ public class ProvisioningTest {
// redeploy with increased sizes and new flavor
SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester);
- assertEquals("New nodes are reserved", 16, tester.nodeRepository().nodes().list(application1, Node.State.reserved).size());
+ assertEquals("New nodes are reserved", 16, tester.nodeRepository().nodes().list(Node.State.reserved).owner(application1).size());
tester.activate(application1, state3.allHosts);
assertEquals("small container nodes are retired because we are swapping the entire cluster",
2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size());
@@ -316,7 +316,7 @@ public class ProvisioningTest {
SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester);
tester.activate(application1, state1.allHosts);
- tester.nodeRepository().nodes().list(application1)
+ tester.nodeRepository().nodes().list().owner(application1)
.forEach(n -> assertEquals(large, tester.nodeRepository().nodes().node(n.parentHostname().get()).get().resources()));
}
@@ -374,7 +374,7 @@ public class ProvisioningTest {
assertEquals(6, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
- assertTrue(tester.nodeRepository().nodes().list(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
+ assertTrue(tester.nodeRepository().nodes().list().owner(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
{
@@ -386,7 +386,7 @@ public class ProvisioningTest {
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast));
- assertTrue(tester.nodeRepository().nodes().list(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
+ assertTrue(tester.nodeRepository().nodes().list().owner(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast));
}
{
@@ -397,7 +397,7 @@ public class ProvisioningTest {
assertEquals(8, state.allHosts.size());
tester.activate(application, state.allHosts);
assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any));
- assertTrue(tester.nodeRepository().nodes().list(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
+ assertTrue(tester.nodeRepository().nodes().list().owner(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any));
}
}
@@ -692,25 +692,25 @@ public class ProvisioningTest {
// Allocate 5 nodes
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacity));
- assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).not().retired().size());
- assertEquals(0, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size());
+ assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).not().retired().size());
+ assertEquals(0, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
// Mark the nodes as want to retire
- tester.nodeRepository().nodes().list(application, Node.State.active).forEach(node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, tester.clock().instant())));
+ tester.nodeRepository().nodes().list(Node.State.active).owner(application).forEach(node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, tester.clock().instant())));
// redeploy without allow failing
tester.activate(application, tester.prepare(application, cluster, capacityFORCED));
// Nodes are not retired since that is unsafe when we cannot fail
- assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).not().retired().size());
- assertEquals(0, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size());
+ assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).not().retired().size());
+ assertEquals(0, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
// ... but we still want to
- tester.nodeRepository().nodes().list(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire()));
+ tester.nodeRepository().nodes().list(Node.State.active).owner(application).forEach(node -> assertTrue(node.status().wantToRetire()));
// redeploy with allowing failing
tester.activate(application, tester.prepare(application, cluster, capacity));
// ... old nodes are now retired
- assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).not().retired().size());
- assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size());
+ assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).not().retired().size());
+ assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
}
@Test
@@ -723,17 +723,17 @@ public class ProvisioningTest {
ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build();
tester.activate(application, tester.prepare(application, cluster, capacityCanFail));
- assertEquals(0, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size());
+ assertEquals(0, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
- tester.patchNode(tester.nodeRepository().nodes().list(application).stream().findAny().orElseThrow(), n -> n.withWantToRetire(true, Agent.system, tester.clock().instant()));
+ tester.patchNode(tester.nodeRepository().nodes().list().owner(application).stream().findAny().orElseThrow(), n -> n.withWantToRetire(true, Agent.system, tester.clock().instant()));
tester.activate(application, tester.prepare(application, cluster, capacityCanFail));
- assertEquals(1, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size());
- assertEquals(6, tester.nodeRepository().nodes().list(application, Node.State.active).size());
+ assertEquals(1, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
+ assertEquals(6, tester.nodeRepository().nodes().list(Node.State.active).owner(application).size());
Capacity capacityCannotFail = Capacity.from(new ClusterResources(5, 1, defaultResources), false, false);
tester.activate(application, tester.prepare(application, cluster, capacityCannotFail));
- assertEquals(1, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size());
- assertEquals(6, tester.nodeRepository().nodes().list(application, Node.State.active).size());
+ assertEquals(1, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size());
+ assertEquals(6, tester.nodeRepository().nodes().list(Node.State.active).owner(application).size());
}
@Test
@@ -901,7 +901,7 @@ public class ProvisioningTest {
try {
prepareAndActivate.apply(cfgApp);
} catch (ParentHostUnavailableException ignored) { }
- assertEquals(2, tester.nodeRepository().nodes().list(cfgApp.getApplicationId()).state(Node.State.reserved).size());
+ assertEquals(2, tester.nodeRepository().nodes().list().owner(cfgApp.getApplicationId()).state(Node.State.reserved).size());
prepareAndActivate.apply(cfgHostApp);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index 86f7ed45ce5..23f504a9c0f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -143,7 +143,7 @@ public class ProvisioningTester {
public NodeRepositoryProvisioner provisioner() { return provisioner; }
public LoadBalancerServiceMock loadBalancerService() { return loadBalancerService; }
public CapacityPolicies capacityPolicies() { return capacityPolicies; }
- public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(id, inState); }
+ public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(inState).owner(id); }
public Node patchNode(Node node, UnaryOperator<Node> patcher) {
return patchNodes(List.of(node), patcher).get(0);
@@ -170,12 +170,12 @@ public class ProvisioningTester {
}
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity) {
- Set<String> reservedBefore = toHostNames(nodeRepository.nodes().list(application, Node.State.reserved));
- Set<String> inactiveBefore = toHostNames(nodeRepository.nodes().list(application, Node.State.inactive));
+ Set<String> reservedBefore = toHostNames(nodeRepository.nodes().list(Node.State.reserved).owner(application));
+ Set<String> inactiveBefore = toHostNames(nodeRepository.nodes().list(Node.State.inactive).owner(application));
List<HostSpec> hosts1 = provisioner.prepare(application, cluster, capacity, provisionLogger);
List<HostSpec> hosts2 = provisioner.prepare(application, cluster, capacity, provisionLogger);
assertEquals("Prepare is idempotent", hosts1, hosts2);
- Set<String> newlyActivated = toHostNames(nodeRepository.nodes().list(application, Node.State.reserved));
+ Set<String> newlyActivated = toHostNames(nodeRepository.nodes().list(Node.State.reserved).owner(application));
newlyActivated.removeAll(reservedBefore);
newlyActivated.removeAll(inactiveBefore);
return hosts1;
@@ -213,7 +213,7 @@ public class ProvisioningTester {
provisioner.activate(hosts, new ActivationContext(0), new ApplicationTransaction(lock, transaction));
transaction.commit();
}
- assertEquals(toHostNames(hosts), toHostNames(nodeRepository.nodes().list(application, Node.State.active)));
+ assertEquals(toHostNames(hosts), toHostNames(nodeRepository.nodes().list(Node.State.active).owner(application)));
return hosts;
}
@@ -259,7 +259,7 @@ public class ProvisioningTester {
* number of matches to the given filters
*/
public void assertRestartCount(ApplicationId application, HostFilter... filters) {
- for (Node node : nodeRepository.nodes().list(application, Node.State.active)) {
+ for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) {
int expectedRestarts = 0;
for (HostFilter filter : filters)
if (NodeHostFilter.from(filter).matches(node))
@@ -464,7 +464,7 @@ public class ProvisioningTester {
application.getClusterSpecWithVersion(configServersVersion),
application.getCapacity());
activate(application.getApplicationId(), new HashSet<>(hosts));
- return nodeRepository.nodes().list(application.getApplicationId(), Node.State.active);
+ return nodeRepository.nodes().list(Node.State.active).owner(application.getApplicationId());
}
public List<Node> makeReadyNodes(int n, String flavor, NodeType type, int ipAddressPoolSize) {
@@ -560,7 +560,7 @@ public class ProvisioningTester {
}
public void assertAllocatedOn(String explanation, String hostFlavor, ApplicationId app) {
- for (Node node : nodeRepository.nodes().list(app)) {
+ for (Node node : nodeRepository.nodes().list().owner(app)) {
Node parent = nodeRepository.nodes().node(node.parentHostname().get()).get();
assertEquals(node + ": " + explanation, hostFlavor, parent.flavor().name());
}
@@ -594,7 +594,7 @@ public class ProvisioningTester {
}
public int hostFlavorCount(String hostFlavor, ApplicationId app) {
- return (int)nodeRepository().nodes().list(app).stream()
+ return (int)nodeRepository().nodes().list().owner(app).stream()
.map(n -> nodeRepository().nodes().node(n.parentHostname().get()).get())
.filter(p -> p.flavor().name().equals(hostFlavor))
.count();