From 7a9fe3fa0024c5dd995c1f48584bc7fab5284299 Mon Sep 17 00:00:00 2001 From: Jon Bratseth Date: Tue, 9 Feb 2021 19:04:25 +0100 Subject: Remove getNodes(...): Always use list(...) --- .../com/yahoo/vespa/hosted/provision/NodeList.java | 10 ++++++++ .../hosted/provision/maintenance/Expirer.java | 2 +- .../provision/maintenance/FailedExpirer.java | 2 +- .../hosted/provision/maintenance/NodeFailer.java | 8 +++--- .../provision/maintenance/NodeHealthTracker.java | 2 +- .../hosted/provision/maintenance/NodeRebooter.java | 2 +- .../maintenance/PeriodicApplicationMaintainer.java | 5 ++-- .../provision/maintenance/RetiredExpirer.java | 3 ++- .../yahoo/vespa/hosted/provision/node/Nodes.java | 24 ++++++++--------- .../persistence/CuratorDatabaseClient.java | 2 ++ .../provisioning/LoadBalancerProvisioner.java | 3 +-- .../hosted/provision/restapi/NodesResponse.java | 2 +- .../vespa/hosted/provision/NodeRepositoryTest.java | 28 ++++++++++---------- .../DynamicProvisioningMaintainerTest.java | 19 +++++++------- .../provision/maintenance/FailedExpirerTest.java | 2 +- .../maintenance/InactiveAndFailedExpirerTest.java | 28 ++++++++++---------- .../provision/maintenance/NodeFailTester.java | 2 +- .../provision/maintenance/NodeFailerTest.java | 30 +++++++++++----------- .../provision/maintenance/NodeRebooterTest.java | 4 +-- .../PeriodicApplicationMaintainerTest.java | 8 +++--- .../provision/maintenance/RebalancerTest.java | 5 ++-- .../vespa/hosted/provision/os/OsVersionsTest.java | 2 +- .../provisioning/DockerProvisioningTest.java | 4 +-- .../provisioning/DynamicDockerAllocationTest.java | 5 ++-- .../provisioning/NodeTypeProvisioningTest.java | 2 +- .../provision/provisioning/ProvisioningTest.java | 8 +++--- 26 files changed, 113 insertions(+), 99 deletions(-) (limited to 'node-repository') diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java index 19c1fa090c9..84aafa77c27 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeList.java @@ -271,4 +271,14 @@ public class NodeList extends AbstractFilteringList { return asList().toString(); } + @Override + public int hashCode() { return asList().hashCode(); } + + @Override + public boolean equals(Object other) { + if (other == this) return true; + if ( ! (other instanceof NodeList)) return false; + return this.asList().equals(((NodeList) other).asList()); + } + } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java index 8ccb8980a71..5b9cd6a69e1 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java @@ -41,7 +41,7 @@ public abstract class Expirer extends NodeRepositoryMaintainer { @Override protected boolean maintain() { - List expired = nodeRepository().nodes().getNodes(fromState).stream() + List expired = nodeRepository().nodes().list(fromState).stream() .filter(this::isExpired) .collect(Collectors.toList()); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java index 08edee0be8b..7317942c045 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java @@ -68,7 +68,7 @@ public class FailedExpirer extends NodeRepositoryMaintainer { @Override protected boolean maintain() { - List remainingNodes = nodeRepository.nodes().getNodes(Node.State.failed).stream() + List remainingNodes = nodeRepository.nodes().list(Node.State.failed).stream() .filter(node -> node.type() == NodeType.tenant || node.type() == NodeType.host) .collect(Collectors.toList()); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java index 7c3e3eb4553..0591bd11eba 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java @@ -129,7 +129,7 @@ public class NodeFailer extends NodeRepositoryMaintainer { clock().instant().minus(downTimeLimit).minus(nodeRequestInterval); Map nodesByFailureReason = new HashMap<>(); - for (Node node : nodeRepository().nodes().getNodes(Node.State.ready)) { + for (Node node : nodeRepository().nodes().list(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else { @@ -148,7 +148,7 @@ public class NodeFailer extends NodeRepositoryMaintainer { } private Map getActiveNodesByFailureReason() { - List activeNodes = nodeRepository().nodes().getNodes(Node.State.active); + NodeList activeNodes = nodeRepository().nodes().list(Node.State.active); Instant graceTimeEnd = clock().instant().minus(downTimeLimit); Map nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { @@ -224,7 +224,7 @@ public class NodeFailer extends NodeRepositoryMaintainer { } /** Is the node and all active children suspended? */ - private boolean hostSuspended(Node node, List activeNodes) { + private boolean hostSuspended(Node node, NodeList activeNodes) { if (!nodeSuspended(node)) return false; if (node.parentHostname().isPresent()) return true; // optimization return activeNodes.stream() @@ -303,7 +303,7 @@ public class NodeFailer extends NodeRepositoryMaintainer { private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow); - List nodes = nodeRepository().nodes().getNodes(); + NodeList nodes = nodeRepository().nodes().list(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.state() == Node.State.failed) .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java index 92131a1cd74..e9dc961ce39 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java @@ -58,7 +58,7 @@ public class NodeHealthTracker extends NodeRepositoryMaintainer { // Update node last request events through ZooKeeper to collect request to all config servers. // We do this here ("lazily") to avoid writing to zk for each config request. try (Mutex lock = nodeRepository().nodes().lockUnallocated()) { - for (Node node : nodeRepository().nodes().getNodes(Node.State.ready)) { + for (Node node : nodeRepository().nodes().list(Node.State.ready)) { Optional lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if (lastLocalRequest.isEmpty()) continue; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java index e2cafbb9406..6ee657beadd 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java @@ -39,7 +39,7 @@ public class NodeRebooter extends NodeRepositoryMaintainer { @Override protected boolean maintain() { // Reboot candidates: Nodes in long-term states, where we know we can safely orchestrate a reboot - List nodesToReboot = nodeRepository().nodes().getNodes(Node.State.active, Node.State.ready).stream() + List nodesToReboot = nodeRepository().nodes().list(Node.State.active, Node.State.ready).stream() .filter(node -> node.type().isHost()) .filter(this::shouldReboot) .collect(Collectors.toList()); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java index 8253b3def0a..e0f6c9d78bb 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java @@ -9,6 +9,7 @@ import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.FlagSource; import com.yahoo.vespa.flags.PermanentFlags; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import java.time.Duration; @@ -75,8 +76,8 @@ public class PeriodicApplicationMaintainer extends ApplicationMaintainer { return ! skipMaintenanceDeployment.value(); } - protected List nodesNeedingMaintenance() { - return nodeRepository().nodes().getNodes(Node.State.active); + protected NodeList nodesNeedingMaintenance() { + return nodeRepository().nodes().list(Node.State.active); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java index 337d25ca732..3064ac2d16b 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java @@ -7,6 +7,7 @@ import com.yahoo.config.provision.Deployer; import com.yahoo.jdisc.Metric; import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.node.History; import com.yahoo.vespa.orchestrator.OrchestrationException; @@ -45,7 +46,7 @@ public class RetiredExpirer extends NodeRepositoryMaintainer { @Override protected boolean maintain() { - List activeNodes = nodeRepository().nodes().getNodes(Node.State.active); + NodeList activeNodes = nodeRepository().nodes().list(Node.State.active); Map> retiredNodesByApplication = activeNodes.stream() .filter(node -> node.allocation().isPresent()) diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java index d61c6f38306..b235d6b0ff0 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java @@ -74,15 +74,6 @@ public class Nodes { return db.readNode(hostname, inState); } - /** - * Returns all nodes in any of the given states. - * - * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned - * @return the node, or empty if it was not found in any of the given states - */ - public List getNodes(Node.State... inState) { - return new ArrayList<>(db.readNodes(inState)); - } /** * Finds and returns the nodes of the given type in any of the given states. * @@ -94,11 +85,20 @@ public class Nodes { return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } - /** Returns a filterable list of nodes in this repository in any of the given states */ + /** + * Returns a list of nodes in this repository in any of the given states + * + * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned + */ public NodeList list(Node.State... inState) { - return NodeList.copyOf(getNodes(inState)); + return NodeList.copyOf(db.readNodes(inState)); } + /** + * Returns a list of nodes in this repository for an application in any of the given states + * + * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned + */ public NodeList list(ApplicationId application, Node.State... inState) { return NodeList.copyOf(getNodes(application, inState)); } @@ -110,7 +110,7 @@ public class Nodes { /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { - return new LockedNodeList(getNodes(), lock); + return new LockedNodeList(list().asList(), lock); } public List getNodes(ApplicationId id, Node.State... inState) { return db.readNodes(id, inState); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java index 696853b2992..95445ad0a66 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java @@ -261,6 +261,8 @@ public class CuratorDatabaseClient { /** * Returns all nodes which are in one of the given states. * If no states are given this returns all nodes. + * + * @return the nodes in a mutable list owned by the caller */ public List readNodes(Node.State ... states) { List nodes = new ArrayList<>(); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java index 156d1023bbc..b1bba656dc8 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java @@ -210,8 +210,7 @@ public class LoadBalancerProvisioner { /** Returns the load balanced clusters of given application and their nodes */ private Map> loadBalancedClustersOf(ApplicationId application) { - NodeList nodes = NodeList.copyOf(nodeRepository.nodes().getNodes(Node.State.reserved, Node.State.active)) - .owner(application); + NodeList nodes = nodeRepository.nodes().list(Node.State.reserved, Node.State.active).owner(application); if (nodes.stream().anyMatch(node -> node.type() == NodeType.config)) { nodes = nodes.nodeType(NodeType.config).type(ClusterSpec.Type.admin); } else if (nodes.stream().anyMatch(node -> node.type() == NodeType.controller)) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java index e71902f908b..5ef71a6c1b1 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java @@ -116,7 +116,7 @@ class NodesResponse extends HttpResponse { /** Outputs all the nodes to a node array */ private void nodesToSlime(Cursor parentObject) { Cursor nodeArray = parentObject.setArray("nodes"); - toSlime(nodeRepository.nodes().getNodes(), nodeArray); + toSlime(nodeRepository.nodes().list().asList(), nodeArray); } private void toSlime(List nodes, Cursor array) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java index 0617884d227..78835912606 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java @@ -32,19 +32,19 @@ public class NodeRepositoryTest { @Test public void add_and_remove() { NodeRepositoryTester tester = new NodeRepositoryTester(); - assertEquals(0, tester.nodeRepository().nodes().getNodes().size()); + assertEquals(0, tester.nodeRepository().nodes().list().size()); tester.addHost("id1", "host1", "default", NodeType.host); tester.addHost("id2", "host2", "default", NodeType.host); tester.addHost("id3", "host3", "default", NodeType.host); - assertEquals(3, tester.nodeRepository().nodes().getNodes().size()); + assertEquals(3, tester.nodeRepository().nodes().list().size()); tester.nodeRepository().nodes().park("host2", true, Agent.system, "Parking to unit test"); tester.nodeRepository().nodes().removeRecursively("host2"); - assertEquals(3, tester.nodeRepository().nodes().getNodes().size()); - assertEquals(1, tester.nodeRepository().nodes().getNodes(Node.State.deprovisioned).size()); + assertEquals(3, tester.nodeRepository().nodes().list().size()); + assertEquals(1, tester.nodeRepository().nodes().list(Node.State.deprovisioned).size()); } @Test @@ -120,7 +120,7 @@ public class NodeRepositoryTest { tester.addNode("node12", "node12", "host1", "docker", NodeType.tenant); tester.addNode("node20", "node20", "host2", "docker", NodeType.tenant); tester.setNodeState("node11", Node.State.active); - assertEquals(6, tester.nodeRepository().nodes().getNodes().size()); + assertEquals(6, tester.nodeRepository().nodes().list().size()); try { tester.nodeRepository().nodes().removeRecursively("host1"); @@ -128,18 +128,18 @@ public class NodeRepositoryTest { } catch (IllegalArgumentException ignored) { // Expected } - assertEquals(6, tester.nodeRepository().nodes().getNodes().size()); + assertEquals(6, tester.nodeRepository().nodes().list().size()); // Should be OK to delete host2 as both host2 and its only child, node20, are in state provisioned tester.nodeRepository().nodes().removeRecursively("host2"); - assertEquals(5, tester.nodeRepository().nodes().getNodes().size()); + assertEquals(5, tester.nodeRepository().nodes().list().size()); assertEquals(Node.State.deprovisioned, tester.nodeRepository().nodes().getNode("host2").get().state()); // Now node10 is in provisioned, set node11 to failed and node12 to ready, and it should be OK to delete host1 tester.nodeRepository().nodes().fail("node11", Agent.system, getClass().getSimpleName()); tester.nodeRepository().nodes().setReady("node12", Agent.system, getClass().getSimpleName()); tester.nodeRepository().nodes().removeRecursively("node12"); // Remove one of the children first instead - assertEquals(4, tester.nodeRepository().nodes().getNodes().size()); + assertEquals(4, tester.nodeRepository().nodes().list().size()); tester.nodeRepository().nodes().removeRecursively("host1"); assertEquals(Node.State.deprovisioned, tester.nodeRepository().nodes().getNode("host1").get().state()); assertEquals(IP.Config.EMPTY.primary(), tester.nodeRepository().nodes().getNode("host1").get().ipConfig().primary()); @@ -155,13 +155,13 @@ public class NodeRepositoryTest { tester.addNode("id2", cfg1, cfghost1, "docker", NodeType.config); tester.setNodeState(cfghost1, Node.State.active); tester.setNodeState(cfg1, Node.State.active); - assertEquals(2, tester.nodeRepository().nodes().getNodes().size()); + assertEquals(2, tester.nodeRepository().nodes().list().size()); try { tester.nodeRepository().nodes().removeRecursively(cfghost1); fail("Should not be able to delete host node, one of the children is in state active"); } catch (IllegalArgumentException ignored) { } - assertEquals(2, tester.nodeRepository().nodes().getNodes().size()); + assertEquals(2, tester.nodeRepository().nodes().list().size()); // Fail host and container tester.nodeRepository().nodes().failRecursively(cfghost1, Agent.system, getClass().getSimpleName()); @@ -225,7 +225,7 @@ public class NodeRepositoryTest { tester.setNodeState("node12", Node.State.active); tester.setNodeState("node20", Node.State.failed); - assertEquals(6, tester.nodeRepository().nodes().getNodes().size()); + assertEquals(6, tester.nodeRepository().nodes().list().size()); // Should be OK to dirty host2 as it is in provisioned and its only child is in failed tester.nodeRepository().nodes().deallocateRecursively("host2", Agent.system, NodeRepositoryTest.class.getSimpleName()); @@ -267,8 +267,8 @@ public class NodeRepositoryTest { tester.setNodeState("node1", Node.State.failed); tester.nodeRepository().nodes().breakfixRecursively("host1", Agent.system, reason); - assertEquals(1, tester.nodeRepository().nodes().getNodes().size()); - Node node = tester.nodeRepository().nodes().getNodes().get(0); + assertEquals(1, tester.nodeRepository().nodes().list().size()); + Node node = tester.nodeRepository().nodes().list().first().get(); assertEquals("host1", node.hostname()); assertEquals(Node.State.breakfixed, node.state()); } @@ -279,7 +279,7 @@ public class NodeRepositoryTest { private static Set filterNodes(NodeRepositoryTester tester, Predicate filter) { return tester.nodeRepository().nodes() - .getNodes().stream() + .list().stream() .filter(filter) .map(Node::hostname) .collect(Collectors.toSet()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java index 2f735742ed5..5cc25e83415 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java @@ -20,6 +20,7 @@ import com.yahoo.vespa.flags.PermanentFlags; import com.yahoo.vespa.flags.custom.ClusterCapacity; import com.yahoo.vespa.flags.custom.SharedHost; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.node.Address; import com.yahoo.vespa.hosted.provision.node.Agent; @@ -88,7 +89,7 @@ public class DynamicProvisioningMaintainerTest { tester.maintainer.maintain(); assertEquals(Set.of("host4", "host4-1"), - tester.nodeRepository.nodes().getNodes(Node.State.failed).stream().map(Node::hostname).collect(Collectors.toSet())); + tester.nodeRepository.nodes().list(Node.State.failed).stream().map(Node::hostname).collect(Collectors.toSet())); } @Test @@ -123,7 +124,7 @@ public class DynamicProvisioningMaintainerTest { ClusterCapacity.class); assertEquals(0, tester.hostProvisioner.provisionedHosts.size()); - assertEquals(11, tester.nodeRepository.nodes().getNodes().size()); + assertEquals(11, tester.nodeRepository.nodes().list().size()); assertTrue(tester.nodeRepository.nodes().getNode("host2").isPresent()); assertTrue(tester.nodeRepository.nodes().getNode("host2-1").isPresent()); assertTrue(tester.nodeRepository.nodes().getNode("host3").isPresent()); @@ -134,7 +135,7 @@ public class DynamicProvisioningMaintainerTest { assertEquals(2, tester.hostProvisioner.provisionedHosts.size()); assertEquals(2, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); - List nodesAfter = tester.nodeRepository.nodes().getNodes(); + NodeList nodesAfter = tester.nodeRepository.nodes().list(); assertEquals(11, nodesAfter.size()); // 2 removed, 2 added assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().getNode("host2").isEmpty()); assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().getNode("host2-1").isEmpty()); @@ -154,7 +155,7 @@ public class DynamicProvisioningMaintainerTest { ClusterCapacity.class); assertEquals(0, tester.hostProvisioner.provisionedHosts.size()); - assertEquals(11, tester.nodeRepository.nodes().getNodes().size()); + assertEquals(11, tester.nodeRepository.nodes().list().size()); assertTrue(tester.nodeRepository.nodes().getNode("host2").isPresent()); assertTrue(tester.nodeRepository.nodes().getNode("host2-1").isPresent()); assertTrue(tester.nodeRepository.nodes().getNode("host3").isPresent()); @@ -194,7 +195,7 @@ public class DynamicProvisioningMaintainerTest { assertEquals(2, tester.hostProvisioner.provisionedHosts.size()); assertEquals(2, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); - assertEquals(10, tester.nodeRepository.nodes().getNodes().size()); // 3 removed, 2 added + assertEquals(10, tester.nodeRepository.nodes().list().size()); // 3 removed, 2 added assertTrue("preprovision capacity is prefered on shared hosts", tester.nodeRepository.nodes().getNode("host3").isEmpty()); assertTrue(tester.nodeRepository.nodes().getNode("hostname100").isPresent()); assertTrue(tester.nodeRepository.nodes().getNode("hostname101").isPresent()); @@ -210,7 +211,7 @@ public class DynamicProvisioningMaintainerTest { assertEquals("one provisioned host has been deprovisioned, so there are 2 -> 1 provisioned hosts", 1, tester.hostProvisioner.provisionedHosts.size()); assertEquals(1, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); - assertEquals(9, tester.nodeRepository.nodes().getNodes().size()); // 4 removed, 2 added + assertEquals(9, tester.nodeRepository.nodes().list().size()); // 4 removed, 2 added if (tester.nodeRepository.nodes().getNode("hostname100").isPresent()) { assertTrue("hostname101 is superfluous and should have been deprovisioned", tester.nodeRepository.nodes().getNode("hostname101").isEmpty()); @@ -224,7 +225,7 @@ public class DynamicProvisioningMaintainerTest { private void verifyFirstMaintain(DynamicProvisioningTester tester) { assertEquals(1, tester.hostProvisioner.provisionedHosts.size()); assertEquals(1, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); - assertEquals(10, tester.nodeRepository.nodes().getNodes().size()); // 2 removed, 1 added + assertEquals(10, tester.nodeRepository.nodes().list().size()); // 2 removed, 1 added assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().getNode("host2").isEmpty()); assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().getNode("host2-1").isEmpty()); assertTrue("One 1-30-20-3 node fits on host3", tester.nodeRepository.nodes().getNode("host3").isPresent()); @@ -482,9 +483,9 @@ public class DynamicProvisioningMaintainerTest { } private void assertNodesUnchanged() { - List nodes = nodeRepository.nodes().getNodes(); + NodeList nodes = nodeRepository.nodes().list(); maintainer.maintain(); - assertEquals("Nodes are unchanged after maintenance run", nodes, nodeRepository.nodes().getNodes()); + assertEquals("Nodes are unchanged after maintenance run", nodes, nodeRepository.nodes().list()); } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java index d02d08f7736..6a10b930871 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java @@ -341,7 +341,7 @@ public class FailedExpirerTest { public void assertNodesIn(Node.State state, String... hostnames) { assertEquals(Stream.of(hostnames).collect(Collectors.toSet()), nodeRepository.nodes() - .getNodes(state).stream() + .list(state).stream() .map(Node::hostname) .collect(Collectors.toSet())); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java index 4bee276af6d..0f24826f1be 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java @@ -15,6 +15,7 @@ import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.node.Agent; import com.yahoo.vespa.hosted.provision.node.History; import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester; @@ -65,14 +66,14 @@ public class InactiveAndFailedExpirerTest { // Inactive times out tester.advanceTime(Duration.ofMinutes(14)); new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run(); - assertEquals(0, tester.nodeRepository().nodes().getNodes(Node.State.inactive).size()); - List dirty = tester.nodeRepository().nodes().getNodes(Node.State.dirty); + assertEquals(0, tester.nodeRepository().nodes().list(Node.State.inactive).size()); + NodeList dirty = tester.nodeRepository().nodes().list(Node.State.dirty); assertEquals(2, dirty.size()); - assertFalse(dirty.get(0).allocation().isPresent()); - assertFalse(dirty.get(1).allocation().isPresent()); + assertFalse(dirty.asList().get(0).allocation().isPresent()); + assertFalse(dirty.asList().get(1).allocation().isPresent()); // One node is set back to ready - Node ready = tester.nodeRepository().nodes().setReady(Collections.singletonList(dirty.get(0)), Agent.system, getClass().getSimpleName()).get(0); + Node ready = tester.nodeRepository().nodes().setReady(Collections.singletonList(dirty.asList().get(0)), Agent.system, getClass().getSimpleName()).get(0); assertEquals("Allocated history is removed on readying", Arrays.asList(History.Event.Type.provisioned, History.Event.Type.readied), ready.history().events().stream().map(History.Event::type).collect(Collectors.toList())); @@ -108,11 +109,11 @@ public class InactiveAndFailedExpirerTest { // Inactive times out and node is moved to dirty tester.advanceTime(Duration.ofMinutes(14)); new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run(); - List dirty = tester.nodeRepository().nodes().getNodes(Node.State.dirty); + NodeList dirty = tester.nodeRepository().nodes().list(Node.State.dirty); assertEquals(2, dirty.size()); // Reboot generation is increased - assertEquals(wantedRebootGeneration + 1, dirty.get(0).status().reboot().wanted()); + assertEquals(wantedRebootGeneration + 1, dirty.first().get().status().reboot().wanted()); } @Test @@ -154,12 +155,12 @@ public class InactiveAndFailedExpirerTest { doThrow(new RuntimeException()).when(orchestrator).acquirePermissionToRemove(any()); new RetiredExpirer(tester.nodeRepository(), tester.orchestrator(), deployer, new TestMetric(), Duration.ofDays(30), Duration.ofMinutes(10)).run(); - assertEquals(1, tester.nodeRepository().nodes().getNodes(Node.State.inactive).size()); + assertEquals(1, tester.nodeRepository().nodes().list(Node.State.inactive).size()); // Inactive times out and one node is moved to parked tester.advanceTime(Duration.ofMinutes(11)); // Trigger InactiveExpirer new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run(); - assertEquals(1, tester.nodeRepository().nodes().getNodes(Node.State.parked).size()); + assertEquals(1, tester.nodeRepository().nodes().list(Node.State.parked).size()); } @Test @@ -181,11 +182,10 @@ public class InactiveAndFailedExpirerTest { // See that nodes are moved to dirty immediately. new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run(); - assertEquals(0, tester.nodeRepository().nodes().getNodes(Node.State.inactive).size()); - List dirty = tester.nodeRepository().nodes().getNodes(Node.State.dirty); + assertEquals(0, tester.nodeRepository().nodes().list(Node.State.inactive).size()); + NodeList dirty = tester.nodeRepository().nodes().list(Node.State.dirty); assertEquals(1, dirty.size()); - assertFalse(dirty.get(0).allocation().isPresent()); - + assertFalse(dirty.first().get().allocation().isPresent()); } @Test @@ -206,7 +206,7 @@ public class InactiveAndFailedExpirerTest { tester.patchNodes(inactiveNodes, (node) -> node.withWantToRetire(true, true, Agent.system, tester.clock().instant())); tester.advanceTime(Duration.ofMinutes(11)); new InactiveExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run(); - assertEquals(2, tester.nodeRepository().nodes().getNodes(Node.State.parked).size()); + assertEquals(2, tester.nodeRepository().nodes().list(Node.State.parked).size()); } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java index dba3ca6a92e..d21f7dfd459 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java @@ -212,7 +212,7 @@ public class NodeFailTester { } public void allNodesMakeAConfigRequestExcept(List deadNodes) { - for (Node node : nodeRepository.nodes().getNodes()) { + for (Node node : nodeRepository.nodes().list()) { if ( ! deadNodes.contains(node)) hostLivenessTracker.receivedRequestFrom(node.hostname()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java index 50b99afbca5..401b4093798 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java @@ -51,7 +51,7 @@ public class NodeFailerTest { String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2); // Set failure report to the parent and all its children. - tester.nodeRepository.nodes().getNodes().stream() + tester.nodeRepository.nodes().list().stream() .filter(node -> node.hostname().equals(hostWithFailureReports)) .forEach(node -> { Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport)); @@ -101,7 +101,7 @@ public class NodeFailerTest { tester.suspend(hostWithHwFailure); tester.runMaintainers(); assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(hostWithHwFailure).get().state()); - assertEquals(4, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size()); } @Test @@ -121,7 +121,7 @@ public class NodeFailerTest { // Set failure report to the parent and all its children. Report badTotalMemorySizeReport = Report.basicReport("badTotalMemorySize", HARD_FAIL, Instant.now(), "too low"); - tester.nodeRepository.nodes().getNodes().stream() + tester.nodeRepository.nodes().list().stream() .filter(node -> node.hostname().equals(hostWithFailureReports)) .forEach(node -> { Node updatedNode = node.with(node.reports().withReport(badTotalMemorySizeReport)); @@ -482,7 +482,7 @@ public class NodeFailerTest { tester.runMaintainers(); tester.clock.advance(Duration.ofMinutes(interval)); tester.allNodesMakeAConfigRequestExcept(); - assertEquals(3 + 1, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(3 + 1, tester.nodeRepository.nodes().list(Node.State.failed).size()); } tester.clock.advance(Duration.ofMinutes(30)); @@ -589,7 +589,7 @@ public class NodeFailerTest { tester.nodeRepository.nodes().write(readyNode.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {}); tester.runMaintainers(); - assertEquals(1, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).size()); } @Test @@ -610,7 +610,7 @@ public class NodeFailerTest { // 2 nodes are failed (the minimum amount that are always allowed to fail) tester.runMaintainers(); - assertEquals(2, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(2, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); assertEquals("Throttled node failures", 2, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); @@ -620,7 +620,7 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(deadNodes); } tester.runMaintainers(); - assertEquals(2, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(2, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); assertEquals("Throttled node failures", 2, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); @@ -630,7 +630,7 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(deadNodes); } tester.runMaintainers(); - assertEquals(4, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size()); // 24 more hours pass, nothing happens for (int minutes = 0, interval = 30; minutes < 24 * 60; minutes += interval) { @@ -652,7 +652,7 @@ public class NodeFailerTest { assertEquals(4 + /* already failed */ 2 + /* hosts */ (2 * 3) /* containers per host */, - tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric)); @@ -662,14 +662,14 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(deadNodes); } tester.runMaintainers(); - assertEquals(12, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(12, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is indicated by the metric", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); assertEquals("Throttled host failures", 1, tester.metric.values.get(NodeFailer.throttledHostFailuresMetric)); // The final host and its containers are failed out tester.clock.advance(Duration.ofMinutes(30)); tester.runMaintainers(); - assertEquals(16, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(16, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); @@ -677,7 +677,7 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofHours(25)); tester.allNodesMakeAConfigRequestExcept(deadNodes); tester.runMaintainers(); - assertEquals(16, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(16, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is not indicated by the metric", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); } @@ -695,7 +695,7 @@ public class NodeFailerTest { } tester.runMaintainers(); // 2% are allowed to fail - assertEquals(10, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); @@ -705,7 +705,7 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(deadNodes); } tester.runMaintainers(); - assertEquals(10, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(10, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is indicated by the metric.", 1, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); assertEquals("Throttled node failures", 5, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); @@ -715,7 +715,7 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(deadNodes); } tester.runMaintainers(); - assertEquals(15, tester.nodeRepository.nodes().getNodes(Node.State.failed).size()); + assertEquals(15, tester.nodeRepository.nodes().list(Node.State.failed).size()); assertEquals("Throttling is not indicated by the metric, as no throttled attempt is made.", 0, tester.metric.values.get(NodeFailer.throttlingActiveMetric)); assertEquals("No throttled node failures", 0, tester.metric.values.get(NodeFailer.throttledNodeFailuresMetric)); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java index b9f3985172b..cf883389225 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java @@ -113,7 +113,7 @@ public class NodeRebooterTest { /** Set current reboot generation to the wanted reboot generation whenever it is larger (i.e record a reboot) */ private void simulateReboot(NodeRepository nodeRepository) { - for (Node node : nodeRepository.nodes().getNodes(Node.State.ready, Node.State.active)) { + for (Node node : nodeRepository.nodes().list(Node.State.ready, Node.State.active)) { if (node.status().reboot().wanted() > node.status().reboot().current()) nodeRepository.nodes().write(node.withCurrentRebootGeneration(node.status().reboot().wanted(), nodeRepository.clock().instant()), () -> {}); @@ -129,7 +129,7 @@ public class NodeRebooterTest { private void simulateOsUpgrade(NodeRepository nodeRepository) { var wantedOsVersion = nodeRepository.osVersions().targetFor(NodeType.host); if (wantedOsVersion.isEmpty()) return; - for (Node node : nodeRepository.nodes().getNodes(Node.State.ready, Node.State.active)) { + for (Node node : nodeRepository.nodes().list(Node.State.ready, Node.State.active)) { if (wantedOsVersion.get().isAfter(node.status().osVersion().current().orElse(Version.emptyVersion))) nodeRepository.nodes().write(node.withCurrentOsVersion(wantedOsVersion.get(), nodeRepository.clock().instant()), () -> {}); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java index 1f1e6a79317..ab00d098130 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java @@ -108,7 +108,7 @@ public class PeriodicApplicationMaintainerTest { fixture.activate(); // Freeze active nodes to simulate an application being deleted during a maintenance run - List frozenActiveNodes = nodeRepository.nodes().getNodes(Node.State.active); + NodeList frozenActiveNodes = nodeRepository.nodes().list(Node.State.active); // Remove one application without letting the application maintainer know about it fixture.remove(fixture.app2); @@ -261,9 +261,9 @@ public class PeriodicApplicationMaintainerTest { private static class TestablePeriodicApplicationMaintainer extends PeriodicApplicationMaintainer { - private List overriddenNodesNeedingMaintenance; + private NodeList overriddenNodesNeedingMaintenance; - void setOverriddenNodesNeedingMaintenance(List overriddenNodesNeedingMaintenance) { + void setOverriddenNodesNeedingMaintenance(NodeList overriddenNodesNeedingMaintenance) { this.overriddenNodesNeedingMaintenance = overriddenNodesNeedingMaintenance; } @@ -273,7 +273,7 @@ public class PeriodicApplicationMaintainerTest { } @Override - protected List nodesNeedingMaintenance() { + protected NodeList nodesNeedingMaintenance() { return overriddenNodesNeedingMaintenance != null ? overriddenNodesNeedingMaintenance : super.nodesNeedingMaintenance(); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java index bc2676c0acf..4f08fa9ab9e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java @@ -17,6 +17,7 @@ import com.yahoo.config.provisioning.FlavorsConfig; import com.yahoo.test.ManualClock; import com.yahoo.transaction.NestedTransaction; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.node.Agent; import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder; @@ -92,7 +93,7 @@ public class RebalancerTest { tester.maintain(); assertTrue("Want to retire is reset", tester.getNodes(Node.State.active).stream().noneMatch(node -> node.status().wantToRetire())); assertEquals("Reserved node was moved to dirty", 1, tester.getNodes(Node.State.dirty).size()); - String reservedHostname = tester.getNodes(Node.State.dirty).get(0).hostname(); + String reservedHostname = tester.getNodes(Node.State.dirty).first().get().hostname(); tester.nodeRepository().nodes().setReady(reservedHostname, Agent.system, "Cleanup"); tester.nodeRepository().nodes().removeRecursively(reservedHostname); @@ -185,7 +186,7 @@ public class RebalancerTest { Optional getNode(String hostname) { return tester.nodeRepository().nodes().getNode(hostname); } - List getNodes(Node.State nodeState) { return tester.nodeRepository().nodes().getNodes(nodeState); } + NodeList getNodes(Node.State nodeState) { return tester.nodeRepository().nodes().list(nodeState); } Node getNode(ApplicationId applicationId) { return tester.nodeRepository().nodes().getNodes(applicationId).get(0); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java index 7005a64127a..e6e685899a5 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java @@ -107,7 +107,7 @@ public class OsVersionsTest { tester.nodeRepository().nodes().fail(host.hostname(), Agent.system, OsVersions.class.getSimpleName()); tester.nodeRepository().nodes().removeRecursively(host.hostname()); } - assertEquals(10, tester.nodeRepository().nodes().getNodes(Node.State.deprovisioned).size()); + assertEquals(10, tester.nodeRepository().nodes().list(Node.State.deprovisioned).size()); // Set target var version1 = Version.fromString("7.1"); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java index 02ee41a5226..48a2d47c173 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java @@ -413,8 +413,8 @@ public class DockerProvisioningTest { } else { assertEquals(0, tester.getNodes(app1, Node.State.inactive).size()); - assertEquals(2, tester.nodeRepository().nodes().getNodes(Node.State.dirty).size()); - tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().getNodes(Node.State.dirty), Agent.system, "test"); + assertEquals(2, tester.nodeRepository().nodes().list(Node.State.dirty).size()); + tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().list(Node.State.dirty).asList(), Agent.system, "test"); tester.activate(app1, cluster1, Capacity.from(new ClusterResources(4, 1, r))); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java index 0c8e19e0793..a0f5fd21f2d 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java @@ -522,11 +522,10 @@ public class DynamicDockerAllocationTest { } private List findSpareCapacity(ProvisioningTester tester) { - List nodes = tester.nodeRepository().nodes().getNodes(State.values()); - NodeList nl = NodeList.copyOf(nodes); + NodeList nodes = tester.nodeRepository().nodes().list(State.values()); return nodes.stream() .filter(n -> n.type() == NodeType.host) - .filter(n -> nl.childrenOf(n).size() == 0) // Nodes without children + .filter(n -> nodes.childrenOf(n).size() == 0) // Nodes without children .collect(Collectors.toList()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java index 9b564232111..acd3311651f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java @@ -265,7 +265,7 @@ public class NodeTypeProvisioningTest { // All the nodes that were marked with wantToRetire earlier are now dirty assertEquals(nodesToRetire.stream().map(Node::hostname).collect(Collectors.toSet()), - tester.nodeRepository().nodes().getNodes(Node.State.dirty).stream().map(Node::hostname).collect(Collectors.toSet())); + tester.nodeRepository().nodes().list(Node.State.dirty).stream().map(Node::hostname).collect(Collectors.toSet())); } private List deployProxies(ApplicationId application, ProvisioningTester tester) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java index 33f33836b8d..cf9b6dbd861 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java @@ -109,7 +109,7 @@ public class ProvisioningTest { tester.remove(application1); assertEquals(tester.toHostNames(previouslyActive.not().container().asList()), tester.toHostNames(tester.nodeRepository().nodes().getNodes(application1, Node.State.inactive))); - assertTrue(tester.nodeRepository().nodes().getNodes(Node.State.dirty).containsAll(previouslyActive.container().asList())); + assertTrue(tester.nodeRepository().nodes().list(Node.State.dirty).asList().containsAll(previouslyActive.container().asList())); assertEquals(0, tester.getNodes(application1, Node.State.active).size()); assertTrue(tester.nodeRepository().applications().get(application1).isEmpty()); @@ -127,7 +127,7 @@ public class ProvisioningTest { tester.activate(application2, state2App2.allHosts); // deploy first app again - tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().getNodes(Node.State.dirty), Agent.system, "recycled"); + tester.nodeRepository().nodes().setReady(tester.nodeRepository().nodes().list(Node.State.dirty).asList(), Agent.system, "recycled"); SystemState state7 = prepare(application1, 2, 2, 3, 3, defaultResources, tester); state7.assertEquals(state1); tester.activate(application1, state7.allHosts); @@ -222,7 +222,7 @@ public class ProvisioningTest { SystemState state3 = prepare(application1, 2, 2, 3, 3, defaultResources, tester); tester.activate(application1, state3.allHosts); assertEquals("Superfluous container nodes are dirtyed", - 3-2 + 4-2, tester.nodeRepository().nodes().getNodes(Node.State.dirty).size()); + 3-2 + 4-2, tester.nodeRepository().nodes().list(Node.State.dirty).size()); assertEquals("Superfluous content nodes are retired", 4-3 + 5-3, tester.getNodes(application1, Node.State.active).retired().size()); @@ -245,7 +245,7 @@ public class ProvisioningTest { SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester); tester.activate(application1, state5.allHosts); assertEquals("Superfluous container nodes are also dirtyed", - 4-2 + 5-2 + 1 + 4-2, tester.nodeRepository().nodes().getNodes(Node.State.dirty).size()); + 4-2 + 5-2 + 1 + 4-2, tester.nodeRepository().nodes().list(Node.State.dirty).size()); assertEquals("Superfluous content nodes are retired", 5-3 + 6-3 - 1, tester.getNodes(application1, Node.State.active).retired().size()); -- cgit v1.2.3 From 55966811bea4d23a7bba84e2145dabfb2bcd68e1 Mon Sep 17 00:00:00 2001 From: Jon Bratseth Date: Tue, 9 Feb 2021 22:11:33 +0100 Subject: Remove getNodes(...): Always use list(...) --- .../hosted/provision/autoscale/Autoscaler.java | 2 +- .../maintenance/MaintenanceDeployment.java | 4 +- .../yahoo/vespa/hosted/provision/node/Nodes.java | 10 +---- .../provisioning/NodeRepositoryProvisioner.java | 2 +- .../hosted/provision/provisioning/Preparer.java | 4 +- .../provision/restapi/NodesV2ApiHandler.java | 2 +- .../provision/testutils/ServiceMonitorStub.java | 2 +- .../provision/autoscale/AutoscalingTest.java | 4 +- .../provision/autoscale/AutoscalingTester.java | 9 +++-- .../autoscale/MetricsV2MetricsFetcherTest.java | 4 +- .../maintenance/AutoscalingMaintainerTester.java | 3 +- .../maintenance/LoadBalancerExpirerTest.java | 2 +- .../provision/maintenance/NodeFailTester.java | 13 +++--- .../provision/maintenance/NodeFailerTest.java | 22 +++++----- .../OperatorChangeApplicationMaintainerTest.java | 12 +++--- .../PeriodicApplicationMaintainerTest.java | 26 ++++++------ .../provision/maintenance/RebalancerTest.java | 4 +- .../provision/maintenance/RetiredExpirerTest.java | 28 ++++++------- .../ScalingSuggestionsMaintainerTest.java | 3 +- .../provisioning/AclProvisioningTest.java | 25 ++++++------ .../provisioning/DynamicDockerAllocationTest.java | 12 +++--- .../provisioning/DynamicDockerProvisionTest.java | 4 +- .../provisioning/LoadBalancerProvisionerTest.java | 13 +++--- .../provision/provisioning/ProvisioningTest.java | 47 +++++++++++----------- .../provision/provisioning/ProvisioningTester.java | 28 ++++++------- 25 files changed, 143 insertions(+), 142 deletions(-) (limited to 'node-repository') diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java index 81fa7ed2d4b..2f01f6b31ae 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java @@ -176,7 +176,7 @@ public class Autoscaler { return false; // A deployment is ongoing - if (nodeRepository.nodes().getNodes(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) + if (nodeRepository.nodes().list(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) return false; return true; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java index e8f216c793a..47712847754 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java @@ -116,7 +116,7 @@ class MaintenanceDeployment implements Closeable { Deployer deployer, NodeRepository nodeRepository) { if (lock.isEmpty()) return Optional.empty(); - if (nodeRepository.nodes().getNodes(application, Node.State.active).isEmpty()) return Optional.empty(); + if (nodeRepository.nodes().list(application, Node.State.active).isEmpty()) return Optional.empty(); return deployer.deployFromLocalActive(application); } @@ -168,7 +168,7 @@ class MaintenanceDeployment implements Closeable { if ( ! deployment.prepare()) return false; if (verifyTarget) { expectedNewNode = - nodeRepository.nodes().getNodes(application, Node.State.reserved).stream() + nodeRepository.nodes().list(application, Node.State.reserved).stream() .filter(n -> !n.hostname().equals(node.hostname())) .filter(n -> n.allocation().get().membership().cluster().id().equals(node.allocation().get().membership().cluster().id())) .findAny(); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java index b235d6b0ff0..904bf08676d 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java @@ -100,12 +100,7 @@ public class Nodes { * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(ApplicationId application, Node.State... inState) { - return NodeList.copyOf(getNodes(application, inState)); - } - - /** Returns a filterable list of all nodes of an application */ - public NodeList list(ApplicationId application) { - return NodeList.copyOf(getNodes(application)); + return NodeList.copyOf(db.readNodes(application, inState)); } /** Returns a locked list of all nodes in this repository */ @@ -113,7 +108,6 @@ public class Nodes { return new LockedNodeList(list().asList(), lock); } - public List getNodes(ApplicationId id, Node.State... inState) { return db.readNodes(id, inState); } public List getInactive() { return db.readNodes(Node.State.inactive); } public List getFailed() { return db.readNodes(Node.State.failed); } @@ -421,7 +415,7 @@ public class Nodes { // TODO: Work out a safe lock acquisition strategy for moves, e.g. migrate to lockNode. try (Mutex lock = lock(node)) { if (toState == Node.State.active) { - for (Node currentActive : getNodes(node.allocation().get().owner(), Node.State.active)) { + for (Node currentActive : list(node.allocation().get().owner(), Node.State.active)) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java index 825ea82e95c..a5057bd1134 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java @@ -150,7 +150,7 @@ public class NodeRepositoryProvisioner implements Provisioner { private ClusterResources currentResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { - List nodes = NodeList.copyOf(nodeRepository.nodes().getNodes(applicationId, Node.State.active)) + List nodes = nodeRepository.nodes().list(applicationId, Node.State.active) .cluster(clusterSpec.id()) .not().retired() .not().removable() diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java index 18ab9b70491..41a6b0d42b1 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java @@ -85,7 +85,7 @@ class Preparer { */ private List findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) { List surplusNodes = new ArrayList<>(0); - for (Node node : nodeRepository.nodes().getNodes(application, Node.State.active)) { + for (Node node : nodeRepository.nodes().list(application, Node.State.active)) { ClusterSpec nodeCluster = node.allocation().get().membership().cluster(); if ( ! nodeCluster.id().equals(requestedCluster.id())) continue; if ( ! nodeCluster.type().equals(requestedCluster.type())) continue; @@ -127,7 +127,7 @@ class Preparer { */ private int findHighestIndex(ApplicationId application, ClusterSpec cluster) { int highestIndex = -1; - for (Node node : nodeRepository.nodes().getNodes(application, Node.State.allocatedStates().toArray(new Node.State[0]))) { + for (Node node : nodeRepository.nodes().list(application, Node.State.allocatedStates().toArray(new Node.State[0]))) { ClusterSpec nodeCluster = node.allocation().get().membership().cluster(); if ( ! nodeCluster.id().equals(cluster.id())) continue; if ( ! nodeCluster.type().equals(cluster.type())) continue; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java index a6a58b6a9dd..b872e2cd9cb 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java @@ -435,7 +435,7 @@ public class NodesV2ApiHandler extends LoggingRequestHandler { if (application.isEmpty()) return ErrorResponse.notFoundError("No application '" + id + "'"); Slime slime = ApplicationSerializer.toSlime(application.get(), - nodeRepository.nodes().getNodes(id, Node.State.active), + nodeRepository.nodes().list(id, Node.State.active).asList(), withPath("/nodes/v2/applications/" + id, uri)); return new SlimeJsonResponse(slime); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java index ce30baa3862..d2a2544c89c 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java @@ -70,7 +70,7 @@ public class ServiceMonitorStub implements ServiceMonitor { Map status = new HashMap<>(); for (Map.Entry app : apps.entrySet()) { Set serviceInstances = new HashSet<>(); - for (Node node : nodeRepository.nodes().getNodes(app.getValue().id(), Node.State.active)) { + for (Node node : nodeRepository.nodes().list(app.getValue().id(), Node.State.active)) { serviceInstances.add(new ServiceInstance(new ConfigId("configid"), new HostName(node.hostname()), getHostStatus(node.hostname()))); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java index a5f3d5f2828..dbb8c00e44d 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java @@ -116,7 +116,7 @@ public class AutoscalingTest { // deploy with slow tester.deploy(application1, cluster1, 5, 1, hostResources); - tester.nodeRepository().nodes().getNodes(application1).stream() + tester.nodeRepository().nodes().list(application1).stream() .allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.slow); tester.clock().advance(Duration.ofDays(2)); @@ -132,7 +132,7 @@ public class AutoscalingTest { assertEquals("Disk speed from min/max is used", NodeResources.DiskSpeed.any, scaledResources.nodeResources().diskSpeed()); tester.deploy(application1, cluster1, scaledResources); - tester.nodeRepository().nodes().getNodes(application1).stream() + tester.nodeRepository().nodes().list(application1).stream() .allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java index e3148ad5de0..8a7f439304d 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java @@ -17,6 +17,7 @@ import com.yahoo.config.provision.Zone; import com.yahoo.test.ManualClock; import com.yahoo.transaction.Mutex; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.Nodelike; import com.yahoo.vespa.hosted.provision.applications.Application; @@ -105,7 +106,7 @@ class AutoscalingTester { public void deactivateRetired(ApplicationId application, ClusterSpec cluster, ClusterResources resources) { try (Mutex lock = nodeRepository().nodes().lock(application)){ - for (Node node : nodeRepository().nodes().getNodes(application, Node.State.active)) { + for (Node node : nodeRepository().nodes().list(application, Node.State.active)) { if (node.allocation().get().membership().retired()) nodeRepository().nodes().write(node.with(node.allocation().get().removable(true)), lock); } @@ -125,7 +126,7 @@ class AutoscalingTester { */ public void addCpuMeasurements(float value, float otherResourcesLoad, int count, ApplicationId applicationId) { - List nodes = nodeRepository().nodes().getNodes(applicationId, Node.State.active); + NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active); float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size()); for (int i = 0; i < count; i++) { clock().advance(Duration.ofMinutes(1)); @@ -156,7 +157,7 @@ class AutoscalingTester { */ public void addMemMeasurements(float value, float otherResourcesLoad, int count, ApplicationId applicationId) { - List nodes = nodeRepository().nodes().getNodes(applicationId, Node.State.active); + NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active); float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size()); for (int i = 0; i < count; i++) { clock().advance(Duration.ofMinutes(1)); @@ -181,7 +182,7 @@ class AutoscalingTester { public void addMeasurements(float cpu, float memory, float disk, int generation, boolean inService, boolean stable, int count, ApplicationId applicationId) { - List nodes = nodeRepository().nodes().getNodes(applicationId, Node.State.active); + NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active); for (int i = 0; i < count; i++) { clock().advance(Duration.ofMinutes(1)); for (Node node : nodes) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java index 1ea4abab17b..9ac9a182512 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java @@ -78,8 +78,8 @@ public class MetricsV2MetricsFetcherTest { { httpClient.cannedResponse = cannedResponseForApplication2; try (Mutex lock = tester.nodeRepository().nodes().lock(application1)) { - tester.nodeRepository().nodes().write(tester.nodeRepository().nodes().getNodes(application2, Node.State.active) - .get(0).retire(tester.clock().instant()), lock); + tester.nodeRepository().nodes().write(tester.nodeRepository().nodes().list(application2, Node.State.active) + .first().get().retire(tester.clock().instant()), lock); } List> values = new ArrayList<>(fetcher.fetchMetrics(application2).get().metrics()); assertFalse(values.get(0).getSecond().stable()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java index d6e6a7548c2..d9ecd0c6653 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java @@ -13,6 +13,7 @@ import com.yahoo.config.provision.Zone; import com.yahoo.config.provisioning.FlavorsConfig; import com.yahoo.test.ManualClock; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.applications.Cluster; import com.yahoo.vespa.hosted.provision.applications.ScalingEvent; @@ -71,7 +72,7 @@ public class AutoscalingMaintainerTester { } public void addMeasurements(float cpu, float mem, float disk, long generation, int count, ApplicationId applicationId) { - List nodes = nodeRepository().nodes().getNodes(applicationId, Node.State.active); + NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active); for (int i = 0; i < count; i++) { for (Node node : nodes) metricsDb.add(List.of(new Pair<>(node.hostname(), new MetricSnapshot(clock().instant(), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java index 832f8c0c318..32b5f567341 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java @@ -132,7 +132,7 @@ public class LoadBalancerExpirerTest { } private void dirtyNodesOf(ApplicationId application, ClusterSpec.Id cluster) { - tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().getNodes(application).stream() + tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list(application).stream() .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().membership().cluster().id().equals(cluster)) .collect(Collectors.toList()), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java index d21f7dfd459..402eabe457e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java @@ -18,6 +18,7 @@ import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.curator.Curator; import com.yahoo.vespa.curator.transaction.CuratorTransaction; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.node.Agent; import com.yahoo.vespa.hosted.provision.node.IP; @@ -102,8 +103,8 @@ public class NodeFailTester { tester.activate(app1, clusterApp1, capacity1); tester.activate(app2, clusterApp2, capacity2); - assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().getNodes(app1, Node.State.active).size()); - assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().getNodes(app2, Node.State.active).size()); + assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(app1, Node.State.active).size()); + assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(app2, Node.State.active).size()); Map apps = Map.of( app1, new MockDeployer.ApplicationContext(app1, clusterApp1, capacity1), @@ -133,9 +134,9 @@ public class NodeFailTester { tester.activate(app1, clusterApp1, capacity1); tester.activate(app2, clusterApp2, capacity2); assertEquals(Set.of(tester.nodeRepository.nodes().getNodes(NodeType.host)), - Set.of(tester.nodeRepository.nodes().getNodes(tenantHostApp, Node.State.active))); - assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().getNodes(app1, Node.State.active).size()); - assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().getNodes(app2, Node.State.active).size()); + Set.of(tester.nodeRepository.nodes().list(tenantHostApp, Node.State.active).asList())); + assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(app1, Node.State.active).size()); + assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(app2, Node.State.active).size()); Map apps = Map.of( tenantHostApp, new MockDeployer.ApplicationContext(tenantHostApp, clusterNodeAdminApp, allHosts), @@ -288,7 +289,7 @@ public class NodeFailTester { } /** Returns the node with the highest membership index from the given set of allocated nodes */ - public Node highestIndex(List nodes) { + public Node highestIndex(NodeList nodes) { Node highestIndex = null; for (Node node : nodes) { if (highestIndex == null || node.allocation().get().membership().index() > diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java index 401b4093798..5f82800d31c 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java @@ -173,8 +173,8 @@ public class NodeFailerTest { tester.suspend(NodeFailTester.app1); // Set two nodes down (one for each application) and wait 65 minutes - String host_from_suspended_app = tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(1).hostname(); - String host_from_normal_app = tester.nodeRepository.nodes().getNodes(NodeFailTester.app2, Node.State.active).get(3).hostname(); + String host_from_suspended_app = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname(); + String host_from_normal_app = tester.nodeRepository.nodes().list(NodeFailTester.app2, Node.State.active).asList().get(3).hostname(); tester.serviceMonitor.setHostDown(host_from_suspended_app); tester.serviceMonitor.setHostDown(host_from_normal_app); tester.runMaintainers(); @@ -191,15 +191,15 @@ public class NodeFailerTest { public void zone_is_not_working_if_too_many_nodes_down() { NodeFailTester tester = NodeFailTester.withTwoApplications(); - tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(0).hostname()); + tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(0).hostname()); tester.runMaintainers(); assertTrue(tester.nodeRepository.nodes().isWorking()); - tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(1).hostname()); + tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname()); tester.runMaintainers(); assertTrue(tester.nodeRepository.nodes().isWorking()); - tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(2).hostname()); + tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(2).hostname()); tester.runMaintainers(); assertFalse(tester.nodeRepository.nodes().isWorking()); @@ -235,8 +235,8 @@ public class NodeFailerTest { assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyFail1.hostname()).get().state()); assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyFail2.hostname()).get().state()); - String downHost1 = tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(1).hostname(); - String downHost2 = tester.nodeRepository.nodes().getNodes(NodeFailTester.app2, Node.State.active).get(3).hostname(); + String downHost1 = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname(); + String downHost2 = tester.nodeRepository.nodes().list(NodeFailTester.app2, Node.State.active).asList().get(3).hostname(); tester.serviceMonitor.setHostDown(downHost1); tester.serviceMonitor.setHostDown(downHost2); // nothing happens the first 45 minutes @@ -280,7 +280,7 @@ public class NodeFailerTest { assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); // the last host goes down - Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active)); + Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active)); tester.serviceMonitor.setHostDown(lastNode.hostname()); // it is not failed because there are no ready nodes to replace it for (int minutes = 0; minutes < 75; minutes +=5 ) { @@ -304,7 +304,7 @@ public class NodeFailerTest { assertEquals( 5, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); assertTrue("The index of the last failed node is not reused", - tester.highestIndex(tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active)).allocation().get().membership().index() + tester.highestIndex(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active)).allocation().get().membership().index() > lastNode.allocation().get().membership().index()); } @@ -312,7 +312,7 @@ public class NodeFailerTest { @Test public void re_activate_grace_period_test() { NodeFailTester tester = NodeFailTester.withTwoApplications(); - String downNode = tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(1).hostname(); + String downNode = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname(); tester.serviceMonitor.setHostDown(downNode); tester.allNodesMakeAConfigRequestExcept(); @@ -349,7 +349,7 @@ public class NodeFailerTest { ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build(); tester.activate(NodeFailTester.app1, cluster, capacity); - String downHost = tester.nodeRepository.nodes().getNodes(NodeFailTester.app1, Node.State.active).get(0).hostname(); + String downHost = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).first().get().hostname(); tester.serviceMonitor.setHostDown(downHost); // nothing happens the first 45 minutes diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java index f331f3bcb4a..b90f98c9ad8 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java @@ -51,19 +51,19 @@ public class OperatorChangeApplicationMaintainerTest { maintainer.maintain(); assertEquals("No changes -> no redeployments", 3, fixture.deployer.redeployments); - nodeRepository.nodes().fail(nodeRepository.nodes().getNodes(fixture.app1).get(3).hostname(), Agent.system, "Failing to unit test"); + nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test"); clock.advance(Duration.ofMinutes(2)); maintainer.maintain(); assertEquals("System change -> no redeployments", 3, fixture.deployer.redeployments); clock.advance(Duration.ofSeconds(1)); - nodeRepository.nodes().fail(nodeRepository.nodes().getNodes(fixture.app2).get(4).hostname(), Agent.operator, "Manual node failing"); + nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app2).asList().get(4).hostname(), Agent.operator, "Manual node failing"); clock.advance(Duration.ofMinutes(2)); maintainer.maintain(); assertEquals("Operator change -> redeployment", 4, fixture.deployer.redeployments); clock.advance(Duration.ofSeconds(1)); - nodeRepository.nodes().fail(nodeRepository.nodes().getNodes(fixture.app3).get(1).hostname(), Agent.operator, "Manual node failing"); + nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app3).asList().get(1).hostname(), Agent.operator, "Manual node failing"); clock.advance(Duration.ofMinutes(2)); maintainer.maintain(); assertEquals("Operator change -> redeployment", 5, fixture.deployer.redeployments); @@ -104,9 +104,9 @@ public class OperatorChangeApplicationMaintainerTest { deployer.deployFromLocalActive(app1, false).get().activate(); deployer.deployFromLocalActive(app2, false).get().activate(); deployer.deployFromLocalActive(app3, false).get().activate(); - assertEquals(wantedNodesApp1, nodeRepository.nodes().getNodes(app1, Node.State.active).size()); - assertEquals(wantedNodesApp2, nodeRepository.nodes().getNodes(app2, Node.State.active).size()); - assertEquals(wantedNodesApp3, nodeRepository.nodes().getNodes(app3, Node.State.active).size()); + assertEquals(wantedNodesApp1, nodeRepository.nodes().list(app1, Node.State.active).size()); + assertEquals(wantedNodesApp2, nodeRepository.nodes().list(app2, Node.State.active).size()); + assertEquals(wantedNodesApp3, nodeRepository.nodes().list(app3, Node.State.active).size()); } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java index ab00d098130..6ba716799d1 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java @@ -64,21 +64,21 @@ public class PeriodicApplicationMaintainerTest { fixture.setBootstrapping(false); // Fail and park some nodes - nodeRepository.nodes().fail(nodeRepository.nodes().getNodes(fixture.app1).get(3).hostname(), Agent.system, "Failing to unit test"); - nodeRepository.nodes().fail(nodeRepository.nodes().getNodes(fixture.app2).get(0).hostname(), Agent.system, "Failing to unit test"); - nodeRepository.nodes().park(nodeRepository.nodes().getNodes(fixture.app2).get(4).hostname(), true, Agent.system, "Parking to unit test"); + nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test"); + nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app2).asList().get(0).hostname(), Agent.system, "Failing to unit test"); + nodeRepository.nodes().park(nodeRepository.nodes().list(fixture.app2).asList().get(4).hostname(), true, Agent.system, "Parking to unit test"); int failedInApp1 = 1; int failedOrParkedInApp2 = 2; - assertEquals(fixture.wantedNodesApp1 - failedInApp1, nodeRepository.nodes().getNodes(fixture.app1, Node.State.active).size()); - assertEquals(fixture.wantedNodesApp2 - failedOrParkedInApp2, nodeRepository.nodes().getNodes(fixture.app2, Node.State.active).size()); + assertEquals(fixture.wantedNodesApp1 - failedInApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size()); + assertEquals(fixture.wantedNodesApp2 - failedOrParkedInApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size()); assertEquals(failedInApp1 + failedOrParkedInApp2, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed, Node.State.parked).size()); assertEquals(3, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); assertEquals(2, nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready).size()); // Cause maintenance deployment which will allocate replacement nodes fixture.runApplicationMaintainer(); - assertEquals(fixture.wantedNodesApp1, nodeRepository.nodes().getNodes(fixture.app1, Node.State.active).size()); - assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().getNodes(fixture.app2, Node.State.active).size()); + assertEquals(fixture.wantedNodesApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size()); + assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size()); assertEquals(0, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); // Reactivate the previously failed nodes @@ -88,8 +88,8 @@ public class PeriodicApplicationMaintainerTest { int reactivatedInApp1 = 1; int reactivatedInApp2 = 2; assertEquals(0, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals(fixture.wantedNodesApp1 + reactivatedInApp1, nodeRepository.nodes().getNodes(fixture.app1, Node.State.active).size()); - assertEquals(fixture.wantedNodesApp2 + reactivatedInApp2, nodeRepository.nodes().getNodes(fixture.app2, Node.State.active).size()); + assertEquals(fixture.wantedNodesApp1 + reactivatedInApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size()); + assertEquals(fixture.wantedNodesApp2 + reactivatedInApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size()); assertEquals("The reactivated nodes are now active but not part of the application", 0, fixture.getNodes(Node.State.active).retired().size()); @@ -112,13 +112,13 @@ public class PeriodicApplicationMaintainerTest { // Remove one application without letting the application maintainer know about it fixture.remove(fixture.app2); - assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().getNodes(fixture.app2, Node.State.inactive).size()); + assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(fixture.app2, Node.State.inactive).size()); // Nodes belonging to app2 are inactive after maintenance fixture.maintainer.setOverriddenNodesNeedingMaintenance(frozenActiveNodes); fixture.runApplicationMaintainer(); assertEquals("Inactive nodes were incorrectly activated after maintenance", fixture.wantedNodesApp2, - nodeRepository.nodes().getNodes(fixture.app2, Node.State.inactive).size()); + nodeRepository.nodes().list(fixture.app2, Node.State.inactive).size()); } @Test(timeout = 60_000) @@ -232,8 +232,8 @@ public class PeriodicApplicationMaintainerTest { void activate() { deployer.deployFromLocalActive(app1, false).get().activate(); deployer.deployFromLocalActive(app2, false).get().activate(); - assertEquals(wantedNodesApp1, nodeRepository.nodes().getNodes(app1, Node.State.active).size()); - assertEquals(wantedNodesApp2, nodeRepository.nodes().getNodes(app2, Node.State.active).size()); + assertEquals(wantedNodesApp1, nodeRepository.nodes().list(app1, Node.State.active).size()); + assertEquals(wantedNodesApp2, nodeRepository.nodes().list(app2, Node.State.active).size()); } void remove(ApplicationId application) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java index 4f08fa9ab9e..a1aa097e4cc 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java @@ -177,7 +177,7 @@ public class RebalancerTest { } List getNodes(ApplicationId applicationId, Node.State nodeState) { - return tester.nodeRepository().nodes().getNodes(applicationId, nodeState); + return tester.nodeRepository().nodes().list(applicationId, nodeState).asList(); } boolean isNodeRetired(Node node) { @@ -188,7 +188,7 @@ public class RebalancerTest { NodeList getNodes(Node.State nodeState) { return tester.nodeRepository().nodes().list(nodeState); } - Node getNode(ApplicationId applicationId) { return tester.nodeRepository().nodes().getNodes(applicationId).get(0); } + Node getNode(ApplicationId applicationId) { return tester.nodeRepository().nodes().list(applicationId).first().get(); } ManualClock clock() { return tester.clock(); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java index fa492c3a3e9..129e4e3a775 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java @@ -71,8 +71,8 @@ public class RetiredExpirerTest { activate(applicationId, cluster, wantedNodes=7, 1); activate(applicationId, cluster, wantedNodes=2, 1); activate(applicationId, cluster, wantedNodes=3, 1); - assertEquals(7, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size()); - assertEquals(0, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size()); + assertEquals(7, nodeRepository.nodes().list(applicationId, Node.State.active).size()); + assertEquals(0, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); // Cause inactivation of retired nodes clock.advance(Duration.ofHours(30)); // Retire period spent @@ -83,12 +83,12 @@ public class RetiredExpirerTest { cluster, Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources))))); createRetiredExpirer(deployer).run(); - assertEquals(3, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size()); - assertEquals(4, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size()); + assertEquals(3, nodeRepository.nodes().list(applicationId, Node.State.active).size()); + assertEquals(4, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); assertEquals(1, deployer.redeployments); // inactivated nodes are not retired - for (Node node : nodeRepository.nodes().getNodes(applicationId, Node.State.inactive)) + for (Node node : nodeRepository.nodes().list(applicationId, Node.State.inactive)) assertFalse(node.allocation().get().membership().retired()); } @@ -106,8 +106,8 @@ public class RetiredExpirerTest { activate(applicationId, cluster, wantedNodes=7, 1); activate(applicationId, cluster, wantedNodes=2, 1); activate(applicationId, cluster, wantedNodes=3, 1); - assertEquals(7, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size()); - assertEquals(0, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size()); + assertEquals(7, nodeRepository.nodes().list(applicationId, Node.State.active).size()); + assertEquals(0, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); // Cause inactivation of retired nodes MockDeployer deployer = @@ -128,27 +128,27 @@ public class RetiredExpirerTest { RetiredExpirer retiredExpirer = createRetiredExpirer(deployer); retiredExpirer.run(); - assertEquals(5, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size()); - assertEquals(2, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size()); + assertEquals(5, nodeRepository.nodes().list(applicationId, Node.State.active).size()); + assertEquals(2, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); assertEquals(1, deployer.redeployments); verify(orchestrator, times(4)).acquirePermissionToRemove(any()); // Running it again has no effect retiredExpirer.run(); - assertEquals(5, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size()); - assertEquals(2, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size()); + assertEquals(5, nodeRepository.nodes().list(applicationId, Node.State.active).size()); + assertEquals(2, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); assertEquals(1, deployer.redeployments); verify(orchestrator, times(6)).acquirePermissionToRemove(any()); clock.advance(RETIRED_EXPIRATION.plusMinutes(1)); retiredExpirer.run(); - assertEquals(3, nodeRepository.nodes().getNodes(applicationId, Node.State.active).size()); - assertEquals(4, nodeRepository.nodes().getNodes(applicationId, Node.State.inactive).size()); + assertEquals(3, nodeRepository.nodes().list(applicationId, Node.State.active).size()); + assertEquals(4, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); assertEquals(2, deployer.redeployments); verify(orchestrator, times(6)).acquirePermissionToRemove(any()); // inactivated nodes are not retired - for (Node node : nodeRepository.nodes().getNodes(applicationId, Node.State.inactive)) + for (Node node : nodeRepository.nodes().list(applicationId, Node.State.inactive)) assertFalse(node.allocation().get().membership().retired()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java index 4d334147212..0d2de73635e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java @@ -14,6 +14,7 @@ import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.Zone; import com.yahoo.config.provisioning.FlavorsConfig; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.applications.Cluster; import com.yahoo.vespa.hosted.provision.autoscale.MetricSnapshot; @@ -121,7 +122,7 @@ public class ScalingSuggestionsMaintainerTest { public void addMeasurements(float cpu, float memory, float disk, int generation, int count, ApplicationId applicationId, NodeRepository nodeRepository, MetricsDb db) { - List nodes = nodeRepository.nodes().getNodes(applicationId, Node.State.active); + NodeList nodes = nodeRepository.nodes().list(applicationId, Node.State.active); for (int i = 0; i < count; i++) { for (Node node : nodes) db.add(List.of(new Pair<>(node.hostname(), new MetricSnapshot(nodeRepository.clock().instant(), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java index 23a8af045af..b697cf1dc4b 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java @@ -8,6 +8,7 @@ import com.yahoo.config.provision.ClusterResources; import com.yahoo.config.provision.NodeResources; import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.node.NodeAcl; import org.junit.Test; @@ -33,7 +34,7 @@ public class AclProvisioningTest { @Test public void trusted_nodes_for_allocated_node() { - List configServers = tester.makeConfigServers(3, "d-1-4-10", Version.fromString("6.123.456")); + NodeList configServers = tester.makeConfigServers(3, "d-1-4-10", Version.fromString("6.123.456")); // Populate repo tester.makeReadyNodes(10, new NodeResources(1, 4, 10, 1)); @@ -55,14 +56,14 @@ public class AclProvisioningTest { Supplier nodeAcls = () -> node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers()); // Trusted nodes are active nodes in same application, proxy nodes and config servers - assertAcls(List.of(activeNodes, proxyNodes, configServers, host), + assertAcls(List.of(activeNodes, proxyNodes, configServers.asList(), host), Set.of("10.2.3.0/24", "10.4.5.0/24"), List.of(nodeAcls.get())); } @Test public void trusted_nodes_for_unallocated_node() { - List configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456")); + NodeList configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456")); // Populate repo tester.makeReadyNodes(10, nodeResources); @@ -77,12 +78,12 @@ public class AclProvisioningTest { List tenantNodes = tester.nodeRepository().nodes().getNodes(NodeType.tenant); // Trusted nodes are all proxy-, config-, and, tenant-nodes - assertAcls(List.of(proxyNodes, configServers, tenantNodes), List.of(nodeAcl)); + assertAcls(List.of(proxyNodes, configServers.asList(), tenantNodes), List.of(nodeAcl)); } @Test public void trusted_nodes_for_config_server() { - List configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456")); + NodeList configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456")); // Populate repo tester.makeReadyNodes(10, nodeResources); @@ -98,12 +99,12 @@ public class AclProvisioningTest { NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers()); // Trusted nodes is all tenant nodes, all proxy nodes, all config servers and load balancer subnets - assertAcls(List.of(tenantNodes, proxyNodes, configServers), Set.of("10.2.3.0/24", "10.4.5.0/24"), List.of(nodeAcl)); + assertAcls(List.of(tenantNodes, proxyNodes, configServers.asList()), Set.of("10.2.3.0/24", "10.4.5.0/24"), List.of(nodeAcl)); } @Test public void trusted_nodes_for_proxy() { - List configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456")); + NodeList configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456")); // Populate repo tester.makeReadyNodes(10, "default"); @@ -114,17 +115,17 @@ public class AclProvisioningTest { tester.deploy(zoneApplication, Capacity.fromRequiredNodeType(NodeType.proxy)); // Get trusted nodes for first proxy node - List proxyNodes = tester.nodeRepository().nodes().getNodes(zoneApplication); - Node node = proxyNodes.get(0); + NodeList proxyNodes = tester.nodeRepository().nodes().list(zoneApplication); + Node node = proxyNodes.first().get(); NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers()); // Trusted nodes is all config servers and all proxy nodes - assertAcls(List.of(proxyNodes, configServers), List.of(nodeAcl)); + assertAcls(List.of(proxyNodes.asList(), configServers.asList()), List.of(nodeAcl)); } @Test public void trusted_nodes_for_children_of_docker_host() { - List configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456")); + NodeList configServers = tester.makeConfigServers(3, "default", Version.fromString("6.123.456")); // Populate repo List dockerHostNodes = tester.makeReadyNodes(2, "default", NodeType.host); @@ -143,7 +144,7 @@ public class AclProvisioningTest { .findFirst() .orElseThrow(() -> new RuntimeException("Expected to find ACL for node " + dockerNode.hostname())); assertEquals(dockerHostNodeUnderTest.hostname(), dockerNode.parentHostname().get()); - assertAcls(List.of(configServers, dockerNodes, List.of(dockerHostNodeUnderTest)), nodeAcl); + assertAcls(List.of(configServers.asList(), dockerNodes, List.of(dockerHostNodeUnderTest)), nodeAcl); } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java index a0f5fd21f2d..301341e1e61 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java @@ -315,9 +315,9 @@ public class DynamicDockerAllocationTest { List hosts = tester.prepare(application, clusterSpec("myContent.t1.a1"), 2, 1, new NodeResources(1, 4, 100, 1)); tester.activate(application, hosts); - List activeNodes = tester.nodeRepository().nodes().getNodes(application); - assertEquals(ImmutableSet.of("127.0.127.13", "::13"), activeNodes.get(0).ipConfig().primary()); - assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.get(1).ipConfig().primary()); + NodeList activeNodes = tester.nodeRepository().nodes().list(application); + assertEquals(ImmutableSet.of("127.0.127.13", "::13"), activeNodes.asList().get(0).ipConfig().primary()); + assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.asList().get(1).ipConfig().primary()); } @Test @@ -437,16 +437,16 @@ public class DynamicDockerAllocationTest { // Redeploy does not change allocation as a host with switch information is no better or worse than hosts // without switch information - List allocatedNodes = tester.nodeRepository().nodes().getNodes(app1); + NodeList allocatedNodes = tester.nodeRepository().nodes().list(app1); tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); - assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().getNodes(app1)); + assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list(app1)); // Initial hosts are attached to the same switch tester.patchNodes(hosts0, (host) -> host.withSwitchHostname(switch0)); // Redeploy does not change allocation tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); - assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().getNodes(app1)); + assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list(app1)); // One regular host and one slow-disk host are provisioned on the same switch String switch1 = "switch1"; diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java index 1d9c04999a1..3d75760edc2 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java @@ -188,7 +188,7 @@ public class DynamicDockerProvisionTest { ApplicationId app = ProvisioningTester.applicationId(); Function retireNode = node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, Instant.now())); - Function getNodeInGroup = group -> tester.nodeRepository().nodes().getNodes(app).stream() + Function getNodeInGroup = group -> tester.nodeRepository().nodes().list(app).stream() .filter(node -> node.allocation().get().membership().cluster().group().get().index() == group) .findAny().orElseThrow(); @@ -209,7 +209,7 @@ public class DynamicDockerProvisionTest { tester.prepare(app, clusterSpec("content"), 8, 2, resources); // Verify that nodes have unique indices from 0..9 - var indices = tester.nodeRepository().nodes().getNodes(app).stream() + var indices = tester.nodeRepository().nodes().list(app).stream() .map(node -> node.allocation().get().membership().index()) .collect(Collectors.toSet()); assertTrue(indices.containsAll(IntStream.range(0, 10).boxed().collect(Collectors.toList()))); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java index fece475852a..93242b1cff2 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java @@ -12,6 +12,7 @@ import com.yahoo.config.provision.NodeResources; import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.flags.InMemoryFlagSource; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.lb.LoadBalancer; import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance; import com.yahoo.vespa.hosted.provision.lb.Real; @@ -136,7 +137,7 @@ public class LoadBalancerProvisionerTest { // Entire application is removed: Nodes and load balancer are deactivated tester.remove(app1); dirtyNodesOf(app1); - assertTrue("No nodes are allocated to " + app1, tester.nodeRepository().nodes().getNodes(app1, Node.State.reserved, Node.State.active).isEmpty()); + assertTrue("No nodes are allocated to " + app1, tester.nodeRepository().nodes().list(app1, Node.State.reserved, Node.State.active).isEmpty()); assertEquals(2, lbApp1.get().size()); assertTrue("Deactivated load balancers", lbApp1.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.inactive)); assertTrue("Load balancers for " + app2 + " remain active", lbApp2.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.active)); @@ -167,7 +168,7 @@ public class LoadBalancerProvisionerTest { var nodes = tester.prepare(app1, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")), 2 , 1, resources); Supplier lb = () -> tester.nodeRepository().loadBalancers().list(app1).asList().get(0); assertTrue("Load balancer provisioned with empty reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty()); - assignIps(tester.nodeRepository().nodes().getNodes(app1)); + assignIps(tester.nodeRepository().nodes().list(app1)); tester.activate(app1, nodes); assertFalse("Load balancer is reconfigured with reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty()); @@ -180,7 +181,7 @@ public class LoadBalancerProvisionerTest { // Application is redeployed nodes = tester.prepare(app1, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")), 2 , 1, resources); assertTrue("Load balancer is reconfigured with empty reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty()); - assignIps(tester.nodeRepository().nodes().getNodes(app1)); + assignIps(tester.nodeRepository().nodes().list(app1)); tester.activate(app1, nodes); assertFalse("Load balancer is reconfigured with reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty()); } @@ -269,7 +270,7 @@ public class LoadBalancerProvisionerTest { } private void dirtyNodesOf(ApplicationId application) { - tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().getNodes(application), Agent.system, this.getClass().getSimpleName()); + tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list(application).asList(), Agent.system, this.getClass().getSimpleName()); } private Set prepare(ApplicationId application, ClusterSpec... specs) { @@ -285,10 +286,10 @@ public class LoadBalancerProvisionerTest { return allNodes; } - private void assignIps(List nodes) { + private void assignIps(NodeList nodes) { try (var lock = tester.nodeRepository().nodes().lockUnallocated()) { for (int i = 0; i < nodes.size(); i++) { - tester.nodeRepository().nodes().write(nodes.get(i).with(IP.Config.EMPTY.withPrimary(Set.of("127.0.0." + i))), lock); + tester.nodeRepository().nodes().write(nodes.asList().get(i).with(IP.Config.EMPTY.withPrimary(Set.of("127.0.0." + i))), lock); } } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java index cf9b6dbd861..324f931be14 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java @@ -95,7 +95,8 @@ public class ProvisioningTest { SystemState state5 = prepare(application1, 2, 2, 3, 3, defaultResources, tester); HostSpec removed = tester.removeOne(state5.allHosts); tester.activate(application1, state5.allHosts); - assertEquals(removed.hostname(), tester.nodeRepository().nodes().getNodes(application1, Node.State.inactive).get(0).hostname()); + assertEquals(removed.hostname(), + tester.nodeRepository().nodes().list(application1, Node.State.inactive).first().get().hostname()); // remove some of the clusters SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester); @@ -107,14 +108,14 @@ public class ProvisioningTest { NodeList previouslyActive = tester.getNodes(application1, Node.State.active); NodeList previouslyInactive = tester.getNodes(application1, Node.State.inactive); tester.remove(application1); - assertEquals(tester.toHostNames(previouslyActive.not().container().asList()), - tester.toHostNames(tester.nodeRepository().nodes().getNodes(application1, Node.State.inactive))); + assertEquals(tester.toHostNames(previouslyActive.not().container()), + tester.toHostNames(tester.nodeRepository().nodes().list(application1, Node.State.inactive))); assertTrue(tester.nodeRepository().nodes().list(Node.State.dirty).asList().containsAll(previouslyActive.container().asList())); assertEquals(0, tester.getNodes(application1, Node.State.active).size()); assertTrue(tester.nodeRepository().applications().get(application1).isEmpty()); // other application is unaffected - assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().nodes().getNodes(application2, Node.State.active))); + assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().nodes().list(application2, Node.State.active))); // fail a node from app2 and make sure it does not get inactive nodes from first HostSpec failed = tester.removeOne(state1App2.allHosts); @@ -289,7 +290,7 @@ public class ProvisioningTest { // redeploy with increased sizes and new flavor SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester); - assertEquals("New nodes are reserved", 16, tester.nodeRepository().nodes().getNodes(application1, Node.State.reserved).size()); + assertEquals("New nodes are reserved", 16, tester.nodeRepository().nodes().list(application1, Node.State.reserved).size()); tester.activate(application1, state3.allHosts); assertEquals("small container nodes are retired because we are swapping the entire cluster", 2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size()); @@ -316,7 +317,7 @@ public class ProvisioningTest { SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester); tester.activate(application1, state1.allHosts); - tester.nodeRepository().nodes().getNodes(application1) + tester.nodeRepository().nodes().list(application1) .forEach(n -> assertEquals(large, tester.nodeRepository().nodes().getNode(n.parentHostname().get()).get().resources())); } @@ -374,7 +375,7 @@ public class ProvisioningTest { assertEquals(6, state.allHosts.size()); tester.activate(application, state.allHosts); assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any)); - assertTrue(tester.nodeRepository().nodes().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any)); + assertTrue(tester.nodeRepository().nodes().list(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any)); } { @@ -386,7 +387,7 @@ public class ProvisioningTest { assertEquals(8, state.allHosts.size()); tester.activate(application, state.allHosts); assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast)); - assertTrue(tester.nodeRepository().nodes().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast)); + assertTrue(tester.nodeRepository().nodes().list(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast)); } { @@ -397,7 +398,7 @@ public class ProvisioningTest { assertEquals(8, state.allHosts.size()); tester.activate(application, state.allHosts); assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any)); - assertTrue(tester.nodeRepository().nodes().getNodes(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any)); + assertTrue(tester.nodeRepository().nodes().list(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any)); } } @@ -692,25 +693,25 @@ public class ProvisioningTest { // Allocate 5 nodes ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build(); tester.activate(application, tester.prepare(application, cluster, capacity)); - assertEquals(5, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).not().retired().size()); - assertEquals(0, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size()); + assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).not().retired().size()); + assertEquals(0, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); // Mark the nodes as want to retire - tester.nodeRepository().nodes().getNodes(application, Node.State.active).forEach(node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, tester.clock().instant()))); + tester.nodeRepository().nodes().list(application, Node.State.active).forEach(node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, tester.clock().instant()))); // redeploy without allow failing tester.activate(application, tester.prepare(application, cluster, capacityFORCED)); // Nodes are not retired since that is unsafe when we cannot fail - assertEquals(5, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).not().retired().size()); - assertEquals(0, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size()); + assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).not().retired().size()); + assertEquals(0, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); // ... but we still want to - tester.nodeRepository().nodes().getNodes(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire())); + tester.nodeRepository().nodes().list(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire())); // redeploy with allowing failing tester.activate(application, tester.prepare(application, cluster, capacity)); // ... old nodes are now retired - assertEquals(5, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).not().retired().size()); - assertEquals(5, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size()); + assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).not().retired().size()); + assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); } @Test @@ -723,17 +724,17 @@ public class ProvisioningTest { ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build(); tester.activate(application, tester.prepare(application, cluster, capacityCanFail)); - assertEquals(0, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size()); + assertEquals(0, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); - tester.patchNode(tester.nodeRepository().nodes().getNodes(application).stream().findAny().orElseThrow(), n -> n.withWantToRetire(true, Agent.system, tester.clock().instant())); + tester.patchNode(tester.nodeRepository().nodes().list(application).stream().findAny().orElseThrow(), n -> n.withWantToRetire(true, Agent.system, tester.clock().instant())); tester.activate(application, tester.prepare(application, cluster, capacityCanFail)); - assertEquals(1, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size()); - assertEquals(6, tester.nodeRepository().nodes().getNodes(application, Node.State.active).size()); + assertEquals(1, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); + assertEquals(6, tester.nodeRepository().nodes().list(application, Node.State.active).size()); Capacity capacityCannotFail = Capacity.from(new ClusterResources(5, 1, defaultResources), false, false); tester.activate(application, tester.prepare(application, cluster, capacityCannotFail)); - assertEquals(1, NodeList.copyOf(tester.nodeRepository().nodes().getNodes(application, Node.State.active)).retired().size()); - assertEquals(6, tester.nodeRepository().nodes().getNodes(application, Node.State.active).size()); + assertEquals(1, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); + assertEquals(6, tester.nodeRepository().nodes().list(application, Node.State.active).size()); } @Test diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java index fde02c083dd..46418ee3439 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java @@ -143,7 +143,7 @@ public class ProvisioningTester { public NodeRepositoryProvisioner provisioner() { return provisioner; } public LoadBalancerServiceMock loadBalancerService() { return loadBalancerService; } public CapacityPolicies capacityPolicies() { return capacityPolicies; } - public NodeList getNodes(ApplicationId id, Node.State ... inState) { return NodeList.copyOf(nodeRepository.nodes().getNodes(id, inState)); } + public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(id, inState); } public Node patchNode(Node node, UnaryOperator patcher) { return patchNodes(List.of(node), patcher).get(0); @@ -170,12 +170,12 @@ public class ProvisioningTester { } public List prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity) { - Set reservedBefore = toHostNames(nodeRepository.nodes().getNodes(application, Node.State.reserved)); - Set inactiveBefore = toHostNames(nodeRepository.nodes().getNodes(application, Node.State.inactive)); + Set reservedBefore = toHostNames(nodeRepository.nodes().list(application, Node.State.reserved)); + Set inactiveBefore = toHostNames(nodeRepository.nodes().list(application, Node.State.inactive)); List hosts1 = provisioner.prepare(application, cluster, capacity, provisionLogger); List hosts2 = provisioner.prepare(application, cluster, capacity, provisionLogger); assertEquals("Prepare is idempotent", hosts1, hosts2); - Set newlyActivated = toHostNames(nodeRepository.nodes().getNodes(application, Node.State.reserved)); + Set newlyActivated = toHostNames(nodeRepository.nodes().list(application, Node.State.reserved)); newlyActivated.removeAll(reservedBefore); newlyActivated.removeAll(inactiveBefore); return hosts1; @@ -213,7 +213,7 @@ public class ProvisioningTester { provisioner.activate(hosts, new ActivationContext(0), new ApplicationTransaction(lock, transaction)); transaction.commit(); } - assertEquals(toHostNames(hosts), toHostNames(nodeRepository.nodes().getNodes(application, Node.State.active))); + assertEquals(toHostNames(hosts), toHostNames(nodeRepository.nodes().list(application, Node.State.active))); return hosts; } @@ -250,7 +250,7 @@ public class ProvisioningTester { return hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet()); } - public Set toHostNames(List nodes) { + public Set toHostNames(NodeList nodes) { return nodes.stream().map(Node::hostname).collect(Collectors.toSet()); } @@ -259,7 +259,7 @@ public class ProvisioningTester { * number of matches to the given filters */ public void assertRestartCount(ApplicationId application, HostFilter... filters) { - for (Node node : nodeRepository.nodes().getNodes(application, Node.State.active)) { + for (Node node : nodeRepository.nodes().list(application, Node.State.active)) { int expectedRestarts = 0; for (HostFilter filter : filters) if (NodeHostFilter.from(filter).matches(node)) @@ -441,7 +441,7 @@ public class ProvisioningTester { return nodes; } - public List makeConfigServers(int n, String flavor, Version configServersVersion) { + public NodeList makeConfigServers(int n, String flavor, Version configServersVersion) { List nodes = new ArrayList<>(n); MockNameResolver nameResolver = (MockNameResolver)nodeRepository().nameResolver(); @@ -464,7 +464,7 @@ public class ProvisioningTester { application.getClusterSpecWithVersion(configServersVersion), application.getCapacity()); activate(application.getApplicationId(), new HashSet<>(hosts)); - return nodeRepository.nodes().getNodes(application.getApplicationId(), Node.State.active); + return nodeRepository.nodes().list(application.getApplicationId(), Node.State.active); } public List makeReadyNodes(int n, String flavor, NodeType type, int ipAddressPoolSize) { @@ -560,7 +560,7 @@ public class ProvisioningTester { } public void assertAllocatedOn(String explanation, String hostFlavor, ApplicationId app) { - for (Node node : nodeRepository.nodes().getNodes(app)) { + for (Node node : nodeRepository.nodes().list(app)) { Node parent = nodeRepository.nodes().getNode(node.parentHostname().get()).get(); assertEquals(node + ": " + explanation, hostFlavor, parent.flavor().name()); } @@ -594,10 +594,10 @@ public class ProvisioningTester { } public int hostFlavorCount(String hostFlavor, ApplicationId app) { - return (int)nodeRepository().nodes().getNodes(app).stream() - .map(n -> nodeRepository().nodes().getNode(n.parentHostname().get()).get()) - .filter(p -> p.flavor().name().equals(hostFlavor)) - .count(); + return (int)nodeRepository().nodes().list(app).stream() + .map(n -> nodeRepository().nodes().getNode(n.parentHostname().get()).get()) + .filter(p -> p.flavor().name().equals(hostFlavor)) + .count(); } public static final class Builder { -- cgit v1.2.3 From b8530e7874b8f063a3681d2f9768a3459a076294 Mon Sep 17 00:00:00 2001 From: Jon Bratseth Date: Wed, 10 Feb 2021 14:50:13 +0100 Subject: Remove getNodes(...): Always use list(...) --- .../provision/lb/SharedLoadBalancerService.java | 5 +- .../hosted/provision/maintenance/NodeFailer.java | 2 +- .../yahoo/vespa/hosted/provision/node/Nodes.java | 11 -- .../hosted/provision/restapi/NodesResponse.java | 2 +- .../hosted/provision/NodeRepositoryTester.java | 2 +- .../provision/maintenance/CapacityCheckerTest.java | 6 +- .../DynamicProvisioningMaintainerTest.java | 2 +- .../maintenance/InactiveAndFailedExpirerTest.java | 6 +- .../provision/maintenance/MetricsReporterTest.java | 2 +- .../provision/maintenance/NodeFailTester.java | 4 +- .../provision/maintenance/NodeFailerTest.java | 153 +++++++++++---------- .../provision/maintenance/NodeRebooterTest.java | 9 +- .../PeriodicApplicationMaintainerTest.java | 18 +-- .../maintenance/ReservationExpirerTest.java | 13 +- .../vespa/hosted/provision/os/OsVersionsTest.java | 8 +- .../provisioning/AclProvisioningTest.java | 10 +- .../provisioning/DynamicDockerAllocationTest.java | 6 +- .../provisioning/DynamicDockerProvisionTest.java | 22 +-- .../provisioning/LoadBalancerProvisionerTest.java | 4 +- .../provisioning/NodeTypeProvisioningTest.java | 39 +++--- .../provision/provisioning/ProvisioningTester.java | 2 +- 21 files changed, 159 insertions(+), 167 deletions(-) (limited to 'node-repository') diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java index da5591e0800..d370681a087 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java @@ -34,14 +34,13 @@ public class SharedLoadBalancerService implements LoadBalancerService { @Override public LoadBalancerInstance create(LoadBalancerSpec spec, boolean force) { - var proxyNodes = new ArrayList<>(nodeRepository.nodes().getNodes(NodeType.proxy)); - proxyNodes.sort(hostnameComparator); + var proxyNodes = nodeRepository.nodes().list().nodeType(NodeType.proxy).sortedBy(hostnameComparator); if (proxyNodes.size() == 0) { throw new IllegalStateException("Missing proxy nodes in node repository"); } - var firstProxyNode = proxyNodes.get(0); + var firstProxyNode = proxyNodes.first().get(); var networkNames = proxyNodes.stream() .flatMap(node -> node.ipConfig().primary().stream()) .map(SharedLoadBalancerService::withPrefixLength) diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java index 0591bd11eba..54fe93062d6 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java @@ -246,7 +246,7 @@ public class NodeFailer extends NodeRepositoryMaintainer { return true; case proxy: case proxyhost: - return nodeRepository().nodes().getNodes(nodeType, Node.State.failed).size() == 0; + return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty(); default: return false; } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java index 904bf08676d..78893106c22 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java @@ -74,17 +74,6 @@ public class Nodes { return db.readNode(hostname, inState); } - /** - * Finds and returns the nodes of the given type in any of the given states. - * - * @param type the node type to return - * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned - * @return the node, or empty if it was not found in any of the given states - */ - public List getNodes(NodeType type, Node.State... inState) { - return db.readNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); - } - /** * Returns a list of nodes in this repository in any of the given states * diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java index 5ef71a6c1b1..6e30ebb7ab3 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java @@ -110,7 +110,7 @@ class NodesResponse extends HttpResponse { private void nodesToSlime(Node.State state, Cursor parentObject) { Cursor nodeArray = parentObject.setArray("nodes"); for (NodeType type : NodeType.values()) - toSlime(nodeRepository.nodes().getNodes(type, state), nodeArray); + toSlime(nodeRepository.nodes().list(state).nodeType(type).asList(), nodeArray); } /** Outputs all the nodes to a node array */ diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java index 5e3b474e48b..e2db91dbae4 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java @@ -52,7 +52,7 @@ public class NodeRepositoryTester { public MockCurator curator() { return curator; } public List getNodes(NodeType type, Node.State ... inState) { - return nodeRepository.nodes().getNodes(type, inState); + return nodeRepository.nodes().list(inState).nodeType(type).asList(); } public Node addHost(String id, String hostname, String flavor, NodeType type) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java index 038d5729f0e..3031bbc9819 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTest.java @@ -28,7 +28,7 @@ public class CapacityCheckerTest { tester.populateNodeRepositoryFromJsonFile(Paths.get(path)); var failurePath = tester.capacityChecker.worstCaseHostLossLeadingToFailure(); assertTrue(failurePath.isPresent()); - assertTrue(tester.nodeRepository.nodes().getNodes(NodeType.host).containsAll(failurePath.get().hostsCausingFailure)); + assertTrue(tester.nodeRepository.nodes().list().nodeType(NodeType.host).asList().containsAll(failurePath.get().hostsCausingFailure)); assertEquals(5, failurePath.get().hostsCausingFailure.size()); } @@ -39,7 +39,7 @@ public class CapacityCheckerTest { 10, new NodeResources(-1, 10, 100, 1), 10, 0, new NodeResources(1, 10, 100, 1), 10); int overcommittedHosts = tester.capacityChecker.findOvercommittedHosts().size(); - assertEquals(tester.nodeRepository.nodes().getNodes(NodeType.host).size(), overcommittedHosts); + assertEquals(tester.nodeRepository.nodes().list().nodeType(NodeType.host).size(), overcommittedHosts); } @Test @@ -63,7 +63,7 @@ public class CapacityCheckerTest { assertTrue(failurePath.isPresent()); assertTrue("Computing worst case host loss if all hosts have to be removed should result in an non-empty failureReason with empty nodes.", failurePath.get().failureReason.tenant.isEmpty() && failurePath.get().failureReason.host.isEmpty()); - assertEquals(tester.nodeRepository.nodes().getNodes(NodeType.host).size(), failurePath.get().hostsCausingFailure.size()); + assertEquals(tester.nodeRepository.nodes().list().nodeType(NodeType.host).size(), failurePath.get().hostsCausingFailure.size()); } { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java index 5cc25e83415..39d49a319ac 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java @@ -382,7 +382,7 @@ public class DynamicProvisioningMaintainerTest { var tester = new DynamicProvisioningTester().addInitialNodes(); tester.hostProvisioner.with(Behaviour.failDnsUpdate); - Supplier> provisioning = () -> tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.provisioned); + Supplier provisioning = () -> tester.nodeRepository.nodes().list(Node.State.provisioned).nodeType(NodeType.host); assertEquals(2, provisioning.get().size()); tester.maintainer.maintain(); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java index 0f24826f1be..81880de4d92 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java @@ -81,10 +81,10 @@ public class InactiveAndFailedExpirerTest { // Dirty times out for the other one tester.advanceTime(Duration.ofMinutes(14)); new DirtyExpirer(tester.nodeRepository(), Duration.ofMinutes(10), new TestMetric()).run(); - assertEquals(0, tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.dirty).size()); - List failed = tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.failed); + assertEquals(0, tester.nodeRepository().nodes().list(Node.State.dirty).nodeType(NodeType.tenant).size()); + NodeList failed = tester.nodeRepository().nodes().list(Node.State.failed).nodeType(NodeType.tenant); assertEquals(1, failed.size()); - assertEquals(1, failed.get(0).status().failCount()); + assertEquals(1, failed.first().get().status().failCount()); } @Test diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java index 6cfd95e828a..b20821bd4a6 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java @@ -181,7 +181,7 @@ public class MetricsReporterTest { } NestedTransaction transaction = new NestedTransaction(); - nodeRepository.nodes().activate(nodeRepository.nodes().getNodes(NodeType.host), transaction); + nodeRepository.nodes().activate(nodeRepository.nodes().list().nodeType(NodeType.host).asList(), transaction); transaction.commit(); Orchestrator orchestrator = mock(Orchestrator.class); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java index 402eabe457e..5ad01902125 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java @@ -133,7 +133,7 @@ public class NodeFailTester { tester.activate(tenantHostApp, clusterNodeAdminApp, allHosts); tester.activate(app1, clusterApp1, capacity1); tester.activate(app2, clusterApp2, capacity2); - assertEquals(Set.of(tester.nodeRepository.nodes().getNodes(NodeType.host)), + assertEquals(Set.of(tester.nodeRepository.nodes().list().nodeType(NodeType.host).asList()), Set.of(tester.nodeRepository.nodes().list(tenantHostApp, Node.State.active).asList())); assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(app1, Node.State.active).size()); assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(app2, Node.State.active).size()); @@ -164,7 +164,7 @@ public class NodeFailTester { Capacity allNodes = Capacity.fromRequiredNodeType(nodeType); ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build(); tester.activate(app1, clusterApp1, allNodes); - assertEquals(count, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.active).size()); + assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size()); Map apps = Map.of( app1, new MockDeployer.ApplicationContext(app1, clusterApp1, allNodes)); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java index 5f82800d31c..56f06b59138 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java @@ -9,6 +9,7 @@ import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.applicationmodel.ServiceInstance; import com.yahoo.vespa.applicationmodel.ServiceStatus; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.node.Agent; import com.yahoo.vespa.hosted.provision.node.Report; @@ -219,19 +220,19 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(); assertEquals( 0, tester.deployer.redeployments); - assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals( 4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); } // Hardware failures are detected on two ready nodes, which are then failed - Node readyFail1 = tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).get(2); - Node readyFail2 = tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).get(3); + Node readyFail1 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(2); + Node readyFail2 = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).asList().get(3); tester.nodeRepository.nodes().write(readyFail1.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {}); tester.nodeRepository.nodes().write(readyFail2.with(new Reports().withReport(badTotalMemorySizeReport)), () -> {}); - assertEquals(4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); tester.runMaintainers(); - assertEquals(2, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyFail1.hostname()).get().state()); assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyFail2.hostname()).get().state()); @@ -245,9 +246,9 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(5)); tester.allNodesMakeAConfigRequestExcept(); assertEquals( 0, tester.deployer.redeployments); - assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals( 2, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals( 2, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); } tester.serviceMonitor.setHostUp(downHost1); @@ -256,10 +257,10 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); assertEquals( 1, tester.deployer.redeployments); - assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals( 3, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals( 1, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); - assertEquals(downHost2, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).get(0).hostname()); + assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); + assertEquals(downHost2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname()); // downHost1 fails again tester.serviceMonitor.setHostDown(downHost1); @@ -275,9 +276,9 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); assertEquals( 2, tester.deployer.redeployments); - assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals( 4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); // the last host goes down Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active)); @@ -288,9 +289,9 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(5)); tester.allNodesMakeAConfigRequestExcept(); assertEquals( 2, tester.deployer.redeployments); - assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals( 4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); } // A new node is available @@ -300,9 +301,9 @@ public class NodeFailerTest { tester.runMaintainers(); // The node is now failed assertEquals( 3, tester.deployer.redeployments); - assertEquals(12, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals( 5, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(12, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); assertTrue("The index of the last failed node is not reused", tester.highestIndex(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active)).allocation().get().membership().index() > @@ -317,12 +318,12 @@ public class NodeFailerTest { tester.serviceMonitor.setHostDown(downNode); tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals(0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); tester.clock.advance(Duration.ofMinutes(75)); tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals(1, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(downNode).get().state()); // Re-activate the node. It is still down, but should not be failed out until the grace period has passed again @@ -330,12 +331,12 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(30)); tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals(0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); tester.clock.advance(Duration.ofMinutes(45)); tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals(1, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(downNode).get().state()); } @@ -358,8 +359,8 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(5)); tester.allNodesMakeAConfigRequestExcept(); assertEquals(0, tester.deployer.redeployments); - assertEquals(3, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals(0, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); } // downHost should now be failed and replaced @@ -367,9 +368,9 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); assertEquals(1, tester.deployer.redeployments); - assertEquals(1, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals(3, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals(downHost, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).get(0).hostname()); + assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals(downHost, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname()); } @Test @@ -385,10 +386,10 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(interval)); tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals( 5, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); } - List ready = tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready); + NodeList ready = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant); // Two ready nodes and a ready docker node die, but only 2 of those are failed out tester.clock.advance(Duration.ofMinutes(180)); @@ -398,16 +399,16 @@ public class NodeFailerTest { .collect(Collectors.toList()); tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode); tester.runMaintainers(); - assertEquals( 3, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); - assertEquals( 2, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); + assertEquals( 2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); // Another ready node dies and the node that died earlier, are allowed to fail tester.clock.advance(Duration.ofDays(1)); tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode, otherNodes.get(3)); tester.runMaintainers(); - assertEquals( 1, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); - assertEquals(otherNodes.get(1), tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).get(0)); - assertEquals( 4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals( 1, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); + assertEquals(otherNodes.get(1), tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).first().get()); + assertEquals( 4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); } @Test @@ -419,17 +420,17 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(interval)); tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals( 3, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready).size()); - assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.failed).size()); + assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size()); + assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size()); } // Two ready nodes and a ready docker node die, but only 2 of those are failed out tester.clock.advance(Duration.ofMinutes(180)); - Node dockerHost = tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready).iterator().next(); + Node dockerHost = tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).iterator().next(); tester.allNodesMakeAConfigRequestExcept(dockerHost); tester.runMaintainers(); - assertEquals( 3, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready).size()); - assertEquals( 0, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.failed).size()); + assertEquals( 3, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size()); + assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.host).size()); } @Test @@ -441,9 +442,9 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(interval)); tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals(13, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); - assertEquals(7, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size()); + assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); + assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size()); } @@ -457,9 +458,9 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(5)); tester.allNodesMakeAConfigRequestExcept(); assertEquals(0, tester.deployer.redeployments); - assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals(13, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); - assertEquals(7, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size()); + assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals(13, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); + assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size()); } tester.clock.advance(Duration.ofMinutes(30)); @@ -467,14 +468,14 @@ public class NodeFailerTest { tester.runMaintainers(); assertEquals(2 + 1, tester.deployer.redeployments); - assertEquals(3, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals(10, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); - assertEquals(6, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size()); + assertEquals(3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals(10, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); + assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size()); // Now lets fail an active tenant node - Node downTenant1 = tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).get(0); + Node downTenant1 = tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).first().get(); tester.serviceMonitor.setHostDown(downTenant1.hostname()); // nothing happens during the entire day because of the failure throttling @@ -490,10 +491,10 @@ public class NodeFailerTest { tester.runMaintainers(); assertEquals(3 + 1, tester.deployer.redeployments); - assertEquals(4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals(9, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); - assertEquals(6, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size()); + assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals(9, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); + assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size()); // Lets fail another host, make sure it is not the same where downTenant1 is a child @@ -505,10 +506,10 @@ public class NodeFailerTest { tester.runMaintainers(); assertEquals(5 + 2, tester.deployer.redeployments); - assertEquals(7, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals(6, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); - assertEquals(5, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size()); + assertEquals(7, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals(6, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); + assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size()); // We have only 5 hosts remaining, so if we fail another host, we should only be able to redeploy app1's // node, while app2's should remain @@ -520,10 +521,10 @@ public class NodeFailerTest { tester.runMaintainers(); assertEquals(6 + 2, tester.deployer.redeployments); - assertEquals(9, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); - assertEquals(8, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).size()); - assertEquals(4, tester.nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); - assertEquals(5, tester.nodeRepository.nodes().getNodes(NodeType.host, Node.State.active).size()); + assertEquals(9, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); + assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); + assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); + assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size()); } @Test @@ -545,7 +546,7 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(5)); tester.allNodesMakeAConfigRequestExcept(); - assertEquals(count, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.active).size()); + assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size()); } Set downHosts = Set.of("host2", "host3"); @@ -558,7 +559,7 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(5)); tester.allNodesMakeAConfigRequestExcept(); assertEquals( 0, tester.deployer.redeployments); - assertEquals(count, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.active).size()); + assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size()); } tester.clock.advance(Duration.ofMinutes(60)); @@ -566,15 +567,15 @@ public class NodeFailerTest { // one down host should now be failed, but not two as we are only allowed to fail one proxy assertEquals(expectedFailCount, tester.deployer.redeployments); - assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.active).size()); - assertEquals(expectedFailCount, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.failed).size()); - tester.nodeRepository.nodes().getNodes(nodeType, Node.State.failed) + assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size()); + assertEquals(expectedFailCount, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType).size()); + tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType) .forEach(node -> assertTrue(downHosts.contains(node.hostname()))); // trying to fail again will still not fail the other down host tester.clock.advance(Duration.ofMinutes(60)); tester.runMaintainers(); - assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().getNodes(nodeType, Node.State.active).size()); + assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size()); } @Test @@ -599,7 +600,7 @@ public class NodeFailerTest { // 50 regular tenant nodes, 10 hosts with each 3 tenant nodes, total 90 nodes NodeFailTester tester = NodeFailTester.withTwoApplicationsOnDocker(10); List readyNodes = tester.createReadyNodes(50, 30); - List hosts = tester.nodeRepository.nodes().getNodes(NodeType.host); + NodeList hosts = tester.nodeRepository.nodes().list().nodeType(NodeType.host); List deadNodes = readyNodes.subList(0, 4); // 2 hours pass, 4 physical nodes die @@ -639,7 +640,7 @@ public class NodeFailerTest { } // 3 hosts fail. 2 of them and all of their children are allowed to fail - List failedHosts = hosts.subList(0, 3); + List failedHosts = hosts.asList().subList(0, 3); failedHosts.forEach(host -> { tester.serviceMonitor.setHostDown(host.hostname()); deadNodes.add(host); @@ -758,7 +759,7 @@ public class NodeFailerTest { */ private static String selectFirstParentHostWithNActiveNodesExcept(NodeRepository nodeRepository, int n, String... except) { Set exceptSet = Arrays.stream(except).collect(Collectors.toSet()); - return nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.active).stream() + return nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).stream() .collect(Collectors.groupingBy(Node::parentHostname)) .entrySet().stream() .filter(entry -> entry.getValue().size() == n) diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java index cf883389225..dbaa5f034f6 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooterTest.java @@ -8,6 +8,7 @@ import com.yahoo.vespa.curator.mock.MockCurator; import com.yahoo.vespa.flags.InMemoryFlagSource; import com.yahoo.vespa.flags.PermanentFlags; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester; import org.junit.Test; @@ -94,8 +95,8 @@ public class NodeRebooterTest { while (true) { rebooter.maintain(); simulateReboot(nodeRepository); - List nodes = nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready); - int count = withCurrentRebootGeneration(1L, nodes).size(); + NodeList nodes = nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host); + int count = withCurrentRebootGeneration(1L, nodes.asList()).size(); if (count == 2) { break; } @@ -103,8 +104,8 @@ public class NodeRebooterTest { } private void assertReadyHosts(int expectedCount, NodeRepository nodeRepository, long generation) { - List nodes = nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready); - assertEquals(expectedCount, withCurrentRebootGeneration(generation, nodes).size()); + NodeList nodes = nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host); + assertEquals(expectedCount, withCurrentRebootGeneration(generation, nodes.asList()).size()); } private void makeReadyHosts(int count, ProvisioningTester tester) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java index 6ba716799d1..67b10a351cb 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java @@ -71,23 +71,23 @@ public class PeriodicApplicationMaintainerTest { int failedOrParkedInApp2 = 2; assertEquals(fixture.wantedNodesApp1 - failedInApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size()); assertEquals(fixture.wantedNodesApp2 - failedOrParkedInApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size()); - assertEquals(failedInApp1 + failedOrParkedInApp2, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed, Node.State.parked).size()); - assertEquals(3, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); - assertEquals(2, nodeRepository.nodes().getNodes(NodeType.host, Node.State.ready).size()); + assertEquals(failedInApp1 + failedOrParkedInApp2, nodeRepository.nodes().list(Node.State.failed, Node.State.parked).nodeType(NodeType.tenant).size()); + assertEquals(3, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); + assertEquals(2, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size()); // Cause maintenance deployment which will allocate replacement nodes fixture.runApplicationMaintainer(); assertEquals(fixture.wantedNodesApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size()); assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size()); - assertEquals(0, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(0, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); // Reactivate the previously failed nodes - nodeRepository.nodes().reactivate(nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).get(0).hostname(), Agent.system, getClass().getSimpleName()); - nodeRepository.nodes().reactivate(nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).get(0).hostname(), Agent.system, getClass().getSimpleName()); - nodeRepository.nodes().reactivate(nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.parked).get(0).hostname(), Agent.system, getClass().getSimpleName()); + nodeRepository.nodes().reactivate(nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).first().get().hostname(), Agent.system, getClass().getSimpleName()); + nodeRepository.nodes().reactivate(nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).first().get().hostname(), Agent.system, getClass().getSimpleName()); + nodeRepository.nodes().reactivate(nodeRepository.nodes().list(Node.State.parked).nodeType(NodeType.tenant).first().get().hostname(), Agent.system, getClass().getSimpleName()); int reactivatedInApp1 = 1; int reactivatedInApp2 = 2; - assertEquals(0, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals(0, nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); assertEquals(fixture.wantedNodesApp1 + reactivatedInApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size()); assertEquals(fixture.wantedNodesApp2 + reactivatedInApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size()); assertEquals("The reactivated nodes are now active but not part of the application", @@ -250,7 +250,7 @@ public class PeriodicApplicationMaintainerTest { } NodeList getNodes(Node.State ... states) { - return NodeList.copyOf(nodeRepository.nodes().getNodes(NodeType.tenant, states)); + return nodeRepository.nodes().list(states).nodeType(NodeType.tenant); } void setBootstrapping(boolean bootstrapping) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java index 5b67c7bc358..2248f11b141 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java @@ -10,6 +10,7 @@ import com.yahoo.config.provision.NodeResources; import com.yahoo.config.provision.NodeType; import com.yahoo.test.ManualClock; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.provisioning.FlavorConfigBuilder; import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester; @@ -40,22 +41,22 @@ public class ReservationExpirerTest { tester.makeReadyHosts(1, hostResources); // Reserve 2 nodes - assertEquals(2, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(2, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); ApplicationId applicationId = new ApplicationId.Builder().tenant("foo").applicationName("bar").instanceName("fuz").build(); ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test")).vespaVersion("6.42").build(); tester.provisioner().prepare(applicationId, cluster, Capacity.from(new ClusterResources(2, 1, nodeResources)), null); - assertEquals(2, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.reserved).size()); + assertEquals(2, nodeRepository.nodes().list(Node.State.reserved).nodeType(NodeType.tenant).size()); // Reservation times out clock.advance(Duration.ofMinutes(14)); // Reserved but not used time out new ReservationExpirer(nodeRepository, Duration.ofMinutes(10), metric).run(); // Assert nothing is reserved - assertEquals(0, nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.reserved).size()); - List dirty = nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.dirty); + assertEquals(0, nodeRepository.nodes().list(Node.State.reserved).nodeType(NodeType.tenant).size()); + NodeList dirty = nodeRepository.nodes().list(Node.State.dirty).nodeType(NodeType.tenant); assertEquals(2, dirty.size()); - assertFalse(dirty.get(0).allocation().isPresent()); - assertFalse(dirty.get(1).allocation().isPresent()); + assertFalse(dirty.asList().get(0).allocation().isPresent()); + assertFalse(dirty.asList().get(1).allocation().isPresent()); assertEquals(2, metric.values.get("expired.reserved")); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java index e6e685899a5..c7a1610f75d 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java @@ -40,7 +40,7 @@ public class OsVersionsTest { public void upgrade() { var versions = new OsVersions(tester.nodeRepository(), new DelegatingUpgrader(tester.nodeRepository(), Integer.MAX_VALUE)); provisionInfraApplication(10); - Supplier> hostNodes = () -> tester.nodeRepository().nodes().getNodes(NodeType.host); + Supplier hostNodes = () -> tester.nodeRepository().nodes().list().nodeType(NodeType.host); // Upgrade OS assertTrue("No versions set", versions.readChange().targets().isEmpty()); @@ -50,7 +50,7 @@ public class OsVersionsTest { assertTrue("Per-node wanted OS version remains unset", hostNodes.get().stream().allMatch(node -> node.status().osVersion().wanted().isEmpty())); // One host upgrades to a later version outside the control of orchestration - Node hostOnLaterVersion = hostNodes.get().get(0); + Node hostOnLaterVersion = hostNodes.get().first().get(); setCurrentVersion(List.of(hostOnLaterVersion), Version.fromString("8.1")); // Upgrade OS again @@ -60,12 +60,12 @@ public class OsVersionsTest { // Resume upgrade versions.resumeUpgradeOf(NodeType.host, true); - List allHosts = hostNodes.get(); + NodeList allHosts = hostNodes.get(); assertTrue("Wanted version is set", allHosts.stream() .filter(node -> !node.equals(hostOnLaterVersion)) .allMatch(node -> node.status().osVersion().wanted().isPresent())); assertTrue("Wanted version is not set for host on later version", - allHosts.get(0).status().osVersion().wanted().isEmpty()); + allHosts.first().get().status().osVersion().wanted().isEmpty()); // Halt upgrade versions.resumeUpgradeOf(NodeType.host, false); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java index b697cf1dc4b..39a206e2223 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java @@ -73,12 +73,12 @@ public class AclProvisioningTest { deploy(2); // Get trusted nodes for a ready tenant node - Node node = tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.ready).get(0); + Node node = tester.nodeRepository().nodes().list(Node.State.ready).nodeType(NodeType.tenant).first().get(); NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers()); - List tenantNodes = tester.nodeRepository().nodes().getNodes(NodeType.tenant); + NodeList tenantNodes = tester.nodeRepository().nodes().list().nodeType(NodeType.tenant); // Trusted nodes are all proxy-, config-, and, tenant-nodes - assertAcls(List.of(proxyNodes, configServers.asList(), tenantNodes), List.of(nodeAcl)); + assertAcls(List.of(proxyNodes, configServers.asList(), tenantNodes.asList()), List.of(nodeAcl)); } @Test @@ -91,7 +91,7 @@ public class AclProvisioningTest { // Allocate 2 nodes deploy(4); - List tenantNodes = tester.nodeRepository().nodes().getNodes(NodeType.tenant); + NodeList tenantNodes = tester.nodeRepository().nodes().list().nodeType(NodeType.tenant); // Get trusted nodes for the first config server Node node = tester.nodeRepository().nodes().getNode("cfg1") @@ -99,7 +99,7 @@ public class AclProvisioningTest { NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers()); // Trusted nodes is all tenant nodes, all proxy nodes, all config servers and load balancer subnets - assertAcls(List.of(tenantNodes, proxyNodes, configServers.asList()), Set.of("10.2.3.0/24", "10.4.5.0/24"), List.of(nodeAcl)); + assertAcls(List.of(tenantNodes.asList(), proxyNodes, configServers.asList()), Set.of("10.2.3.0/24", "10.4.5.0/24"), List.of(nodeAcl)); } @Test diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java index 301341e1e61..94ce5b8c5fb 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java @@ -69,7 +69,7 @@ public class DynamicDockerAllocationTest { .build(); tester.makeReadyNodes(4, "host-small", NodeType.host, 32); tester.activateTenantHosts(); - List dockerHosts = tester.nodeRepository().nodes().getNodes(NodeType.host, State.active); + List dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList(); NodeResources flavor = new NodeResources(1, 4, 100, 1); // Application 1 @@ -110,7 +110,7 @@ public class DynamicDockerAllocationTest { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(5, "host-small", NodeType.host, 32); tester.activateTenantHosts(); - List dockerHosts = tester.nodeRepository().nodes().getNodes(NodeType.host, State.active); + NodeList dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host); NodeResources resources = new NodeResources(1, 4, 100, 0.3); // Application 1 @@ -202,7 +202,7 @@ public class DynamicDockerAllocationTest { ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east"))).flavorsConfig(flavorsConfig()).build(); tester.makeReadyNodes(2, "host-small", NodeType.host, 32); tester.activateTenantHosts(); - List dockerHosts = tester.nodeRepository().nodes().getNodes(NodeType.host, State.active); + List dockerHosts = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).asList(); NodeResources flavor = new NodeResources(1, 4, 100, 1); // Application 1 diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java index 3d75760edc2..5f8a0c99b9f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java @@ -78,9 +78,9 @@ public class DynamicDockerProvisionTest { // Total of 8 nodes should now be in node-repo, 4 active hosts and 4 active nodes assertEquals(8, tester.nodeRepository().nodes().list().size()); - assertEquals(4, tester.nodeRepository().nodes().getNodes(NodeType.host, Node.State.active).size()); + assertEquals(4, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size()); assertEquals(List.of("host-100-1", "host-101-1", "host-102-1", "host-103-1"), - tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.active).stream() + tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).stream() .map(Node::hostname).sorted().collect(Collectors.toList())); // Deploy new application @@ -89,10 +89,10 @@ public class DynamicDockerProvisionTest { // Total of 12 nodes should now be in node-repo, 4 active hosts and 8 active nodes assertEquals(12, tester.nodeRepository().nodes().list().size()); - assertEquals(4, tester.nodeRepository().nodes().getNodes(NodeType.host, Node.State.active).size()); + assertEquals(4, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size()); assertEquals(List.of("host-100-1", "host-100-2", "host-101-1", "host-101-2", "host-102-1", "host-102-2", "host-103-1", "host-103-2"), - tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.active).stream() + tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).stream() .map(Node::hostname).sorted().collect(Collectors.toList())); // Deploy new exclusive application @@ -104,8 +104,8 @@ public class DynamicDockerProvisionTest { // Total of 20 nodes should now be in node-repo, 8 active hosts and 12 active nodes assertEquals(20, tester.nodeRepository().nodes().list().size()); - assertEquals(8, tester.nodeRepository().nodes().getNodes(NodeType.host, Node.State.active).size()); - assertEquals(12, tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.active).size()); + assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.host).size()); + assertEquals(12, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); verifyNoMoreInteractions(hostProvisioner); } @@ -125,7 +125,7 @@ public class DynamicDockerProvisionTest { // Total of 16 nodes should now be in node-repo, 8 active hosts and 8 active nodes assertEquals(16, tester.nodeRepository().nodes().list().size()); - assertEquals(8, tester.nodeRepository().nodes().getNodes(NodeType.tenant, Node.State.active).size()); + assertEquals(8, tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.tenant).size()); prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, smallResources); prepareAndActivate(application2, clusterSpec("mycluster", true), 4, 1, smallResources); @@ -153,11 +153,11 @@ public class DynamicDockerProvisionTest { ApplicationId application3 = ProvisioningTester.applicationId(); prepareAndActivate(application3, clusterSpec("mycluster"), 3, 1, resources); - assertEquals(4, tester.nodeRepository().nodes().getNodes(NodeType.tenant).stream().map(Node::parentHostname).distinct().count()); + assertEquals(4, tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).stream().map(Node::parentHostname).distinct().count()); ApplicationId application4 = ProvisioningTester.applicationId(); prepareAndActivate(application4, clusterSpec("mycluster"), 3, 1, resources); - assertEquals(5, tester.nodeRepository().nodes().getNodes(NodeType.tenant).stream().map(Node::parentHostname).distinct().count()); + assertEquals(5, tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).stream().map(Node::parentHostname).distinct().count()); } @Test @@ -398,9 +398,9 @@ public class DynamicDockerProvisionTest { private void prepareAndActivate(ApplicationId application, ClusterSpec clusterSpec, int nodes, int groups, NodeResources resources) { List prepared = tester.prepare(application, clusterSpec, nodes, groups, resources); - List provisionedHosts = tester.nodeRepository().nodes().getNodes(NodeType.host, Node.State.provisioned); + NodeList provisionedHosts = tester.nodeRepository().nodes().list(Node.State.provisioned).nodeType(NodeType.host); if (!provisionedHosts.isEmpty()) { - tester.nodeRepository().nodes().setReady(provisionedHosts, Agent.system, DynamicDockerProvisionTest.class.getSimpleName()); + tester.nodeRepository().nodes().setReady(provisionedHosts.asList(), Agent.system, DynamicDockerProvisionTest.class.getSimpleName()); tester.activateTenantHosts(); } tester.activate(application, prepared); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java index 93242b1cff2..a5e7704cce7 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java @@ -174,8 +174,8 @@ public class LoadBalancerProvisionerTest { // Application is removed, nodes are deleted and load balancer is deactivated tester.remove(app1); - tester.nodeRepository().database().removeNodes(tester.nodeRepository().nodes().getNodes(NodeType.tenant)); - assertTrue("Nodes are deleted", tester.nodeRepository().nodes().getNodes(NodeType.tenant).isEmpty()); + tester.nodeRepository().database().removeNodes(tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).asList()); + assertTrue("Nodes are deleted", tester.nodeRepository().nodes().list().nodeType(NodeType.tenant).isEmpty()); assertSame("Load balancer is deactivated", LoadBalancer.State.inactive, lb.get().state()); // Application is redeployed diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java index acd3311651f..596591ad599 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java @@ -7,6 +7,7 @@ import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.HostSpec; import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.maintenance.RetiredExpirer; import com.yahoo.vespa.hosted.provision.maintenance.TestMetric; import com.yahoo.vespa.hosted.provision.node.Agent; @@ -53,7 +54,7 @@ public class NodeTypeProvisioningTest { List hosts = deployProxies(application, tester); assertEquals("Reserved all proxies", 11, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals("Activated all proxies", 11, nodes.size()); } @@ -61,7 +62,7 @@ public class NodeTypeProvisioningTest { List hosts = deployProxies(application, tester); assertEquals(11, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals(11, nodes.size()); } @@ -70,20 +71,20 @@ public class NodeTypeProvisioningTest { List hosts = deployProxies(application, tester); assertEquals(13, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals(13, nodes.size()); } { // Remove 3 proxies then redeploy - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); - tester.nodeRepository().nodes().fail(nodes.get(0).hostname(), Agent.system, "Failing to unit test"); - tester.nodeRepository().nodes().fail(nodes.get(1).hostname(), Agent.system, "Failing to unit test"); - tester.nodeRepository().nodes().fail(nodes.get(5).hostname(), Agent.system, "Failing to unit test"); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); + tester.nodeRepository().nodes().fail(nodes.asList().get(0).hostname(), Agent.system, "Failing to unit test"); + tester.nodeRepository().nodes().fail(nodes.asList().get(1).hostname(), Agent.system, "Failing to unit test"); + tester.nodeRepository().nodes().fail(nodes.asList().get(5).hostname(), Agent.system, "Failing to unit test"); List hosts = deployProxies(application, tester); assertEquals(10, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals(10, nodes.size()); } } @@ -107,18 +108,18 @@ public class NodeTypeProvisioningTest { List hosts = deployProxies(application, tester); assertEquals("Reserved all proxies", 11, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals("Activated all proxies", 11, nodes.size()); } - Node nodeToRetire = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active).get(5); + Node nodeToRetire = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy).asList().get(5); { // Pick out a node and retire it tester.nodeRepository().nodes().write(nodeToRetire.withWantToRetire(true, Agent.system, tester.clock().instant()), () -> {}); List hosts = deployProxies(application, tester); assertEquals(11, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals(11, nodes.size()); // Verify that wantToRetire has been propagated @@ -132,7 +133,7 @@ public class NodeTypeProvisioningTest { List hosts = deployProxies(application, tester); assertEquals(11, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals(11, nodes.size()); // Verify that the node is still marked as retired @@ -149,7 +150,7 @@ public class NodeTypeProvisioningTest { List hosts = deployProxies(application, tester); assertEquals(10, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals(10, nodes.size()); // Verify that the node is now inactive @@ -176,11 +177,11 @@ public class NodeTypeProvisioningTest { List hosts = deployProxies(application, tester); assertEquals("Reserved all proxies", 11, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals("Activated all proxies", 11, nodes.size()); } - List nodesToRetire = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active) + List nodesToRetire = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy).asList() .subList(3, 3 + numNodesToRetire); String currentyRetiringHostname; { @@ -190,7 +191,7 @@ public class NodeTypeProvisioningTest { List hosts = deployProxies(application, tester); assertEquals(11, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals(11, nodes.size()); // Verify that wantToRetire has been propagated @@ -208,7 +209,7 @@ public class NodeTypeProvisioningTest { List hosts = deployProxies(application, tester); assertEquals(11, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals(11, nodes.size()); // Verify that wantToRetire has been propagated @@ -228,7 +229,7 @@ public class NodeTypeProvisioningTest { List hosts = deployProxies(application, tester); assertEquals(10, hosts.size()); tester.activate(application, new HashSet<>(hosts)); - List nodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active); + NodeList nodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy); assertEquals(10, nodes.size()); // Verify the node we previously set to retire has finished retiring @@ -257,7 +258,7 @@ public class NodeTypeProvisioningTest { } // After a long time, all currently active proxy nodes are not marked with wantToRetire or as retired - long numRetiredActiveProxyNodes = tester.nodeRepository().nodes().getNodes(NodeType.proxy, Node.State.active).stream() + long numRetiredActiveProxyNodes = tester.nodeRepository().nodes().list(Node.State.active).nodeType(NodeType.proxy).stream() .filter(node -> !node.status().wantToRetire()) .filter(node -> !node.allocation().get().membership().retired()) .count(); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java index 46418ee3439..fd8eaf9d938 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java @@ -318,7 +318,7 @@ public class ProvisioningTester { public void fail(String hostname) { int beforeFailCount = nodeRepository.nodes().getNode(hostname, Node.State.active).get().status().failCount(); Node failedNode = nodeRepository.nodes().fail(hostname, Agent.system, "Failing to unit test"); - assertTrue(nodeRepository.nodes().getNodes(NodeType.tenant, Node.State.failed).contains(failedNode)); + assertTrue(nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().contains(failedNode)); assertEquals(beforeFailCount + 1, failedNode.status().failCount()); } -- cgit v1.2.3 From 26ed007b3f26708cdfa5413d64f0dfa06fd3b7f2 Mon Sep 17 00:00:00 2001 From: Jon Bratseth Date: Wed, 10 Feb 2021 15:14:13 +0100 Subject: getNode(...) -> node(...) --- .../IdentityDocumentGenerator.java | 2 +- .../IdentityDocumentGeneratorTest.java | 4 +- .../provision/autoscale/MemoryMetricsDb.java | 2 +- .../maintenance/MaintenanceDeployment.java | 2 +- .../hosted/provision/maintenance/NodeFailer.java | 7 +- .../provision/maintenance/NodeHealthTracker.java | 2 +- .../maintenance/SpareCapacityMaintainer.java | 2 +- .../yahoo/vespa/hosted/provision/node/Nodes.java | 26 +++---- .../hosted/provision/provisioning/Activator.java | 2 +- .../hosted/provision/restapi/NodeAclResponse.java | 2 +- .../hosted/provision/restapi/NodesResponse.java | 2 +- .../provision/restapi/NodesV2ApiHandler.java | 2 +- .../vespa/hosted/provision/NodeRepositoryTest.java | 24 +++--- .../hosted/provision/NodeRepositoryTester.java | 2 +- .../provision/autoscale/AutoscalingTester.java | 4 +- .../DynamicProvisioningMaintainerTest.java | 90 +++++++++++----------- .../provision/maintenance/FailedExpirerTest.java | 2 +- .../provision/maintenance/NodeFailerTest.java | 54 ++++++------- .../maintenance/OsUpgradeActivatorTest.java | 2 +- .../provision/maintenance/RebalancerTest.java | 2 +- .../vespa/hosted/provision/os/OsVersionsTest.java | 2 +- .../provisioning/AclProvisioningTest.java | 4 +- .../provisioning/DockerProvisioningTest.java | 6 +- .../provisioning/NodeTypeProvisioningTest.java | 8 +- .../provision/provisioning/ProvisioningTest.java | 11 ++- .../provision/provisioning/ProvisioningTester.java | 8 +- 26 files changed, 136 insertions(+), 138 deletions(-) (limited to 'node-repository') diff --git a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGenerator.java b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGenerator.java index cf4ca85ecfd..b2ae42cc294 100644 --- a/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGenerator.java +++ b/athenz-identity-provider-service/src/main/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGenerator.java @@ -47,7 +47,7 @@ public class IdentityDocumentGenerator { public SignedIdentityDocument generateSignedIdentityDocument(String hostname, IdentityType identityType) { try { - Node node = nodeRepository.nodes().getNode(hostname).orElseThrow(() -> new RuntimeException("Unable to find node " + hostname)); + Node node = nodeRepository.nodes().node(hostname).orElseThrow(() -> new RuntimeException("Unable to find node " + hostname)); Allocation allocation = node.allocation().orElseThrow(() -> new RuntimeException("No allocation for node " + node.hostname())); VespaUniqueInstanceId providerUniqueId = new VespaUniqueInstanceId( allocation.membership().index(), diff --git a/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGeneratorTest.java b/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGeneratorTest.java index a6dfc6e9b9e..368767959bb 100644 --- a/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGeneratorTest.java +++ b/athenz-identity-provider-service/src/test/java/com/yahoo/vespa/hosted/athenz/instanceproviderservice/identitydocument/IdentityDocumentGeneratorTest.java @@ -72,8 +72,8 @@ public class IdentityDocumentGeneratorTest { Nodes nodes = mock(Nodes.class); when(nodeRepository.nodes()).thenReturn(nodes); - when(nodes.getNode(eq(parentHostname))).thenReturn(Optional.of(parentNode)); - when(nodes.getNode(eq(containerHostname))).thenReturn(Optional.of(containerNode)); + when(nodes.node(eq(parentHostname))).thenReturn(Optional.of(parentNode)); + when(nodes.node(eq(containerHostname))).thenReturn(Optional.of(containerNode)); AutoGeneratedKeyProvider keyProvider = new AutoGeneratedKeyProvider(); String dnsSuffix = "vespa.dns.suffix"; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java index a881bde2a33..45173650d60 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MemoryMetricsDb.java @@ -75,7 +75,7 @@ public class MemoryMetricsDb implements MetricsDb { private void add(String hostname, MetricSnapshot snapshot) { NodeTimeseries timeseries = db.get(hostname); if (timeseries == null) { // new node - Optional node = nodeRepository.nodes().getNode(hostname); + Optional node = nodeRepository.nodes().node(hostname); if (node.isEmpty()) return; if (node.get().allocation().isEmpty()) return; timeseries = new NodeTimeseries(hostname, new ArrayList<>()); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java index 47712847754..11292f9aa60 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java @@ -185,7 +185,7 @@ class MaintenanceDeployment implements Closeable { markWantToRetire(node, false, agent, nodeRepository); // Necessary if this failed, no-op otherwise // Immediately clean up if we reserved the node but could not activate or reserved a node on the wrong host - expectedNewNode.flatMap(node -> nodeRepository.nodes().getNode(node.hostname(), Node.State.reserved)) + expectedNewNode.flatMap(node -> nodeRepository.nodes().node(node.hostname(), Node.State.reserved)) .ifPresent(node -> nodeRepository.nodes().deallocate(node, agent, "Expired by " + agent)); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java index 54fe93062d6..ba884ced630 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java @@ -30,7 +30,6 @@ import java.util.logging.Logger; import java.util.stream.Collectors; import static java.util.stream.Collectors.collectingAndThen; -import static java.util.stream.Collectors.counting; /** * Maintains information in the node repo about when this node last responded to ping @@ -133,7 +132,7 @@ public class NodeFailer extends NodeRepositoryMaintainer { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else { - Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().getNode(parent)).orElse(node); + Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node); List failureReports = reasonsToFailParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { @@ -158,7 +157,7 @@ public class NodeFailer extends NodeRepositoryMaintainer { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (hostSuspended(node, activeNodes)) { - Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().getNode(parent)).orElse(node); + Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node); if (hostNode.type().isHost()) { List failureReports = reasonsToFailParentHost(hostNode); if (failureReports.size() > 0) { @@ -184,7 +183,7 @@ public class NodeFailer extends NodeRepositoryMaintainer { /** Returns whether node has any kind of hardware issue */ static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { - Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.nodes().getNode(parent)).orElse(node); + Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.nodes().node(parent)).orElse(node); return reasonsToFailParentHost(hostNode).size() > 0; } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java index e9dc961ce39..2950de285b9 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java @@ -116,7 +116,7 @@ public class NodeHealthTracker extends NodeRepositoryMaintainer { /** Get node by given hostname and application. The applicationLock must be held when calling this */ private Optional getNode(String hostname, ApplicationId application, @SuppressWarnings("unused") Mutex applicationLock) { - return nodeRepository().nodes().getNode(hostname, Node.State.active) + return nodeRepository().nodes().node(hostname, Node.State.active) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(application)); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java index debc1484e58..ca580753fc8 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java @@ -165,7 +165,7 @@ public class SpareCapacityMaintainer extends NodeRepositoryMaintainer { try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) { if ( ! deployment.isValid()) return; // this will be done at another config server - Optional nodeWithWantToRetire = nodeRepository().nodes().getNode(nodeToRetire.get().hostname()) + Optional nodeWithWantToRetire = nodeRepository().nodes().node(nodeToRetire.get().hostname()) .map(node -> node.withWantToRetire(true, Agent.SpareCapacityMaintainer, nodeRepository().clock().instant())); if (nodeWithWantToRetire.isEmpty()) return; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java index 78893106c22..2dfd23b8680 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java @@ -70,7 +70,7 @@ public class Nodes { * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ - public Optional getNode(String hostname, Node.State... inState) { + public Optional node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } @@ -121,7 +121,7 @@ public class Nodes { illegal("Cannot add " + node + ": This is not a docker node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Docker containers needs to be allocated"); - Optional existing = getNode(node.hostname()); + Optional existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + @@ -148,7 +148,7 @@ public class Nodes { illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } - Optional existing = getNode(node.hostname()); + Optional existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); @@ -186,7 +186,7 @@ public class Nodes { } public Node setReady(String hostname, Agent agent, String reason) { - Node nodeToReady = getNode(hostname).orElseThrow(() -> + Node nodeToReady = node(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to ready: Node not found")); if (nodeToReady.state() == Node.State.ready) return nodeToReady; @@ -238,7 +238,7 @@ public class Nodes { } public List deallocateRecursively(String hostname, Agent agent, String reason) { - Node nodeToDirty = getNode(hostname).orElseThrow(() -> + Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List nodesToDirty = @@ -351,7 +351,7 @@ public class Nodes { * Moves a host to breakfixed state, removing any children. */ public List breakfixRecursively(String hostname, Agent agent, String reason) { - Node node = getNode(hostname).orElseThrow(() -> + Node node = node(hostname).orElseThrow(() -> new NoSuchNodeException("Could not breakfix " + hostname + ": Node not found")); try (Mutex lock = lockUnallocated()) { @@ -380,7 +380,7 @@ public class Nodes { private Node move(String hostname, boolean keepAllocation, Node.State toState, Agent agent, Optional reason, NestedTransaction transaction) { - Node node = getNode(hostname).orElseThrow(() -> + Node node = node(hostname).orElseThrow(() -> new NoSuchNodeException("Could not move " + hostname + " to " + toState + ": Node not found")); if (!keepAllocation && node.allocation().isPresent()) { @@ -419,7 +419,7 @@ public class Nodes { * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { - Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); + Node node = node(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); @@ -428,7 +428,7 @@ public class Nodes { if (node.state() == Node.State.ready) return node; - Node parentHost = node.parentHostname().flatMap(this::getNode).orElse(node); + Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List failureReasons = NodeFailer.reasonsToFailParentHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); @@ -442,7 +442,7 @@ public class Nodes { * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List removeRecursively(String hostname) { - Node node = getNode(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); + Node node = node(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); return removeRecursively(node, false); } @@ -670,9 +670,9 @@ public class Nodes { Mutex lockToClose = lock(staleNode); try { // As an optimization we first try finding the node in the same state - Optional freshNode = getNode(staleNode.hostname(), staleNode.state()); + Optional freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { - freshNode = getNode(staleNode.hostname()); + freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } @@ -698,7 +698,7 @@ public class Nodes { /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional lockAndGet(String hostname) { - return getNode(hostname).flatMap(this::lockAndGet); + return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java index cad9faacf20..3c936e4e6ba 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java @@ -126,7 +126,7 @@ class Activator { private void unreserveParentsOf(List nodes) { for (Node node : nodes) { if ( node.parentHostname().isEmpty()) continue; - Optional parentNode = nodeRepository.nodes().getNode(node.parentHostname().get()); + Optional parentNode = nodeRepository.nodes().node(node.parentHostname().get()); if (parentNode.isEmpty()) continue; if (parentNode.get().reservedTo().isEmpty()) continue; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java index 708a2f73ee6..811afc77cef 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java @@ -39,7 +39,7 @@ public class NodeAclResponse extends HttpResponse { } private void toSlime(String hostname, Cursor object) { - Node node = nodeRepository.nodes().getNode(hostname) + Node node = nodeRepository.nodes().node(hostname) .orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); List acls = aclsForChildren ? nodeRepository.getChildAcls(node) : diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java index 6e30ebb7ab3..1175736e517 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java @@ -127,7 +127,7 @@ class NodesResponse extends HttpResponse { } private void nodeToSlime(String hostname, Cursor object) { - Node node = nodeRepository.nodes().getNode(hostname).orElseThrow(() -> + Node node = nodeRepository.nodes().node(hostname).orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); toSlime(node, true, object); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java index b872e2cd9cb..56b46c106ee 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java @@ -223,7 +223,7 @@ public class NodesV2ApiHandler extends LoggingRequestHandler { private Node nodeFromRequest(HttpRequest request) { String hostname = lastElement(request.getUri().getPath()); - return nodeRepository.nodes().getNode(hostname).orElseThrow(() -> + return nodeRepository.nodes().node(hostname).orElseThrow(() -> new NotFoundException("No node found with hostname " + hostname)); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java index 78835912606..c74ecaccd06 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java @@ -75,13 +75,13 @@ public class NodeRepositoryTest { tester.setNodeState("cfg1", Node.State.dirty); tester.nodeRepository().nodes().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName()); - assertEquals(Node.State.ready, tester.nodeRepository().nodes().getNode("host1").get().state()); + assertEquals(Node.State.ready, tester.nodeRepository().nodes().node("host1").get().state()); tester.nodeRepository().nodes().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName()); - assertFalse(tester.nodeRepository().nodes().getNode("host2").isPresent()); + assertFalse(tester.nodeRepository().nodes().node("host2").isPresent()); tester.nodeRepository().nodes().markNodeAvailableForNewAllocation("cfg1", Agent.system, getClass().getSimpleName()); - assertEquals(Node.State.ready, tester.nodeRepository().nodes().getNode("cfg1").get().state()); + assertEquals(Node.State.ready, tester.nodeRepository().nodes().node("cfg1").get().state()); } @Test @@ -92,14 +92,14 @@ public class NodeRepositoryTest { tester.setNodeState("host1", Node.State.dirty); tester.setNodeState("host2", Node.State.dirty); - Node node2 = tester.nodeRepository().nodes().getNode("host2").orElseThrow(); + Node node2 = tester.nodeRepository().nodes().node("host2").orElseThrow(); var reportsBuilder = new Reports.Builder(node2.reports()); reportsBuilder.setReport(Report.basicReport("reportId", Report.Type.HARD_FAIL, Instant.EPOCH, "hardware failure")); node2 = node2.with(reportsBuilder.build()); tester.nodeRepository().nodes().write(node2, () -> {}); tester.nodeRepository().nodes().markNodeAvailableForNewAllocation("host1", Agent.system, getClass().getSimpleName()); - assertEquals(Node.State.ready, tester.nodeRepository().nodes().getNode("host1").get().state()); + assertEquals(Node.State.ready, tester.nodeRepository().nodes().node("host1").get().state()); try { tester.nodeRepository().nodes().markNodeAvailableForNewAllocation("host2", Agent.system, getClass().getSimpleName()); @@ -133,7 +133,7 @@ public class NodeRepositoryTest { // Should be OK to delete host2 as both host2 and its only child, node20, are in state provisioned tester.nodeRepository().nodes().removeRecursively("host2"); assertEquals(5, tester.nodeRepository().nodes().list().size()); - assertEquals(Node.State.deprovisioned, tester.nodeRepository().nodes().getNode("host2").get().state()); + assertEquals(Node.State.deprovisioned, tester.nodeRepository().nodes().node("host2").get().state()); // Now node10 is in provisioned, set node11 to failed and node12 to ready, and it should be OK to delete host1 tester.nodeRepository().nodes().fail("node11", Agent.system, getClass().getSimpleName()); @@ -141,8 +141,8 @@ public class NodeRepositoryTest { tester.nodeRepository().nodes().removeRecursively("node12"); // Remove one of the children first instead assertEquals(4, tester.nodeRepository().nodes().list().size()); tester.nodeRepository().nodes().removeRecursively("host1"); - assertEquals(Node.State.deprovisioned, tester.nodeRepository().nodes().getNode("host1").get().state()); - assertEquals(IP.Config.EMPTY.primary(), tester.nodeRepository().nodes().getNode("host1").get().ipConfig().primary()); + assertEquals(Node.State.deprovisioned, tester.nodeRepository().nodes().node("host1").get().state()); + assertEquals(IP.Config.EMPTY.primary(), tester.nodeRepository().nodes().node("host1").get().ipConfig().primary()); } @Test @@ -179,7 +179,7 @@ public class NodeRepositoryTest { tester.clock().advance(Duration.ofSeconds(1)); tester.addHost("id1", "host1", "default", NodeType.host); tester.addHost("id2", "host2", "default", NodeType.host); - assertFalse(tester.nodeRepository().nodes().getNode("host1").get().history().hasEventAfter(History.Event.Type.deprovisioned, testStart)); + assertFalse(tester.nodeRepository().nodes().node("host1").get().history().hasEventAfter(History.Event.Type.deprovisioned, testStart)); // Set host 1 properties and deprovision it try (var lock = tester.nodeRepository().nodes().lockAndGetRequired("host1")) { @@ -192,16 +192,16 @@ public class NodeRepositoryTest { tester.nodeRepository().nodes().removeRecursively("host1"); // Host 1 is deprovisioned and unwanted properties are cleared - Node host1 = tester.nodeRepository().nodes().getNode("host1").get(); + Node host1 = tester.nodeRepository().nodes().node("host1").get(); assertEquals(Node.State.deprovisioned, host1.state()); assertTrue(host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart)); // Adding it again preserves some information from the deprovisioned host and removes it tester.addHost("id2", "host1", "default", NodeType.host); - host1 = tester.nodeRepository().nodes().getNode("host1").get(); + host1 = tester.nodeRepository().nodes().node("host1").get(); assertEquals("This is the newly added node", "id2", host1.id()); assertFalse("The old 'host1' is removed", - tester.nodeRepository().nodes().getNode("host1", Node.State.deprovisioned).isPresent()); + tester.nodeRepository().nodes().node("host1", Node.State.deprovisioned).isPresent()); assertFalse("Not transferred from deprovisioned host", host1.status().wantToRetire()); assertFalse("Not transferred from deprovisioned host", host1.status().wantToDeprovision()); assertTrue("Transferred from deprovisioned host", host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart)); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java index e2db91dbae4..195a27f21a3 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java @@ -79,7 +79,7 @@ public class NodeRepositoryTester { * of valid state transitions */ public void setNodeState(String hostname, Node.State state) { - Node node = nodeRepository.nodes().getNode(hostname).orElseThrow(RuntimeException::new); + Node node = nodeRepository.nodes().node(hostname).orElseThrow(RuntimeException::new); nodeRepository.database().writeTo(state, node, Agent.system, Optional.empty()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java index 8a7f439304d..0581f9f84b3 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java @@ -96,9 +96,9 @@ class AutoscalingTester { } public void makeReady(String hostname) { - Node node = nodeRepository().nodes().getNode(hostname).get(); + Node node = nodeRepository().nodes().node(hostname).get(); provisioningTester.patchNode(node, (n) -> n.with(new IP.Config(Set.of("::" + 0 + ":0"), Set.of()))); - Node host = nodeRepository().nodes().getNode(node.parentHostname().get()).get(); + Node host = nodeRepository().nodes().node(node.parentHostname().get()).get(); host = host.with(new IP.Config(Set.of("::" + 0 + ":0"), Set.of("::" + 0 + ":2"))); if (host.state() == Node.State.provisioned) nodeRepository().nodes().setReady(List.of(host), Agent.system, getClass().getSimpleName()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java index 39d49a319ac..26370709fa3 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainerTest.java @@ -63,9 +63,9 @@ public class DynamicProvisioningMaintainerTest { var tester = new DynamicProvisioningTester().addInitialNodes(); tester.hostProvisioner.with(Behaviour.failDeprovisioning); // To avoid deleting excess nodes - Node host3 = tester.nodeRepository.nodes().getNode("host3").orElseThrow(); - Node host4 = tester.nodeRepository.nodes().getNode("host4").orElseThrow(); - Node host41 = tester.nodeRepository.nodes().getNode("host4-1").orElseThrow(); + Node host3 = tester.nodeRepository.nodes().node("host3").orElseThrow(); + Node host4 = tester.nodeRepository.nodes().node("host4").orElseThrow(); + Node host41 = tester.nodeRepository.nodes().node("host4-1").orElseThrow(); assertTrue("No IP addresses assigned", Stream.of(host3, host4, host41).map(node -> node.ipConfig().primary()).allMatch(Set::isEmpty)); @@ -74,9 +74,9 @@ public class DynamicProvisioningMaintainerTest { Node host41new = host41.with(host41.ipConfig().withPrimary(Set.of("::4:1", "::4:2"))); tester.maintainer.maintain(); - assertEquals(host3new, tester.nodeRepository.nodes().getNode("host3").get()); - assertEquals(host4new, tester.nodeRepository.nodes().getNode("host4").get()); - assertEquals(host41new, tester.nodeRepository.nodes().getNode("host4-1").get()); + assertEquals(host3new, tester.nodeRepository.nodes().node("host3").get()); + assertEquals(host4new, tester.nodeRepository.nodes().node("host4").get()); + assertEquals(host41new, tester.nodeRepository.nodes().node("host4-1").get()); } @Test @@ -95,23 +95,23 @@ public class DynamicProvisioningMaintainerTest { @Test public void finds_nodes_that_need_deprovisioning_without_pre_provisioning() { var tester = new DynamicProvisioningTester().addInitialNodes(); - assertTrue(tester.nodeRepository.nodes().getNode("host2").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("host3").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("host2").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("host3").isPresent()); tester.maintainer.maintain(); - assertTrue(tester.nodeRepository.nodes().getNode("host2").isEmpty()); - assertTrue(tester.nodeRepository.nodes().getNode("host3").isEmpty()); + assertTrue(tester.nodeRepository.nodes().node("host2").isEmpty()); + assertTrue(tester.nodeRepository.nodes().node("host3").isEmpty()); } @Test public void does_not_deprovision_when_preprovisioning_enabled() { var tester = new DynamicProvisioningTester().addInitialNodes(); tester.flagSource.withListFlag(PermanentFlags.PREPROVISION_CAPACITY.id(), List.of(new ClusterCapacity(1, 1, 3, 2, 1.0)), ClusterCapacity.class); - Optional failedHost = tester.nodeRepository.nodes().getNode("host2"); + Optional failedHost = tester.nodeRepository.nodes().node("host2"); assertTrue(failedHost.isPresent()); tester.maintainer.maintain(); - assertTrue("Failed host is deprovisioned", tester.nodeRepository.nodes().getNode(failedHost.get().hostname()).isEmpty()); + assertTrue("Failed host is deprovisioned", tester.nodeRepository.nodes().node(failedHost.get().hostname()).isEmpty()); assertEquals(1, tester.hostProvisioner.deprovisionedHosts); } @@ -125,11 +125,11 @@ public class DynamicProvisioningMaintainerTest { assertEquals(0, tester.hostProvisioner.provisionedHosts.size()); assertEquals(11, tester.nodeRepository.nodes().list().size()); - assertTrue(tester.nodeRepository.nodes().getNode("host2").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("host2-1").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("host3").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("hostname100").isEmpty()); - assertTrue(tester.nodeRepository.nodes().getNode("hostname101").isEmpty()); + assertTrue(tester.nodeRepository.nodes().node("host2").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("host2-1").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("host3").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("hostname100").isEmpty()); + assertTrue(tester.nodeRepository.nodes().node("hostname101").isEmpty()); tester.maintainer.maintain(); @@ -137,11 +137,11 @@ public class DynamicProvisioningMaintainerTest { assertEquals(2, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); NodeList nodesAfter = tester.nodeRepository.nodes().list(); assertEquals(11, nodesAfter.size()); // 2 removed, 2 added - assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().getNode("host2").isEmpty()); - assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().getNode("host2-1").isEmpty()); - assertTrue("Host satisfying 16-24-100-1 is kept", tester.nodeRepository.nodes().getNode("host3").isPresent()); - assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().getNode("hostname100").isPresent()); - assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().getNode("hostname101").isPresent()); + assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().node("host2").isEmpty()); + assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().node("host2-1").isEmpty()); + assertTrue("Host satisfying 16-24-100-1 is kept", tester.nodeRepository.nodes().node("host3").isPresent()); + assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().node("hostname100").isPresent()); + assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().node("hostname101").isPresent()); } @Test @@ -156,10 +156,10 @@ public class DynamicProvisioningMaintainerTest { assertEquals(0, tester.hostProvisioner.provisionedHosts.size()); assertEquals(11, tester.nodeRepository.nodes().list().size()); - assertTrue(tester.nodeRepository.nodes().getNode("host2").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("host2-1").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("host3").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("hostname100").isEmpty()); + assertTrue(tester.nodeRepository.nodes().node("host2").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("host2-1").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("host3").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("hostname100").isEmpty()); // The first cluster will be allocated to host3 and a new host hostname100. // hostname100 will be a large shared host specified above. @@ -196,9 +196,9 @@ public class DynamicProvisioningMaintainerTest { assertEquals(2, tester.hostProvisioner.provisionedHosts.size()); assertEquals(2, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); assertEquals(10, tester.nodeRepository.nodes().list().size()); // 3 removed, 2 added - assertTrue("preprovision capacity is prefered on shared hosts", tester.nodeRepository.nodes().getNode("host3").isEmpty()); - assertTrue(tester.nodeRepository.nodes().getNode("hostname100").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("hostname101").isPresent()); + assertTrue("preprovision capacity is prefered on shared hosts", tester.nodeRepository.nodes().node("host3").isEmpty()); + assertTrue(tester.nodeRepository.nodes().node("hostname100").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("hostname101").isPresent()); // If the preprovision capacity is reduced, we should see shared hosts deprovisioned. @@ -212,12 +212,12 @@ public class DynamicProvisioningMaintainerTest { 1, tester.hostProvisioner.provisionedHosts.size()); assertEquals(1, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); assertEquals(9, tester.nodeRepository.nodes().list().size()); // 4 removed, 2 added - if (tester.nodeRepository.nodes().getNode("hostname100").isPresent()) { + if (tester.nodeRepository.nodes().node("hostname100").isPresent()) { assertTrue("hostname101 is superfluous and should have been deprovisioned", - tester.nodeRepository.nodes().getNode("hostname101").isEmpty()); + tester.nodeRepository.nodes().node("hostname101").isEmpty()); } else { assertTrue("hostname101 is required for preprovision capacity", - tester.nodeRepository.nodes().getNode("hostname101").isPresent()); + tester.nodeRepository.nodes().node("hostname101").isPresent()); } } @@ -226,10 +226,10 @@ public class DynamicProvisioningMaintainerTest { assertEquals(1, tester.hostProvisioner.provisionedHosts.size()); assertEquals(1, tester.provisionedHostsMatching(new NodeResources(48, 128, 1000, 10))); assertEquals(10, tester.nodeRepository.nodes().list().size()); // 2 removed, 1 added - assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().getNode("host2").isEmpty()); - assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().getNode("host2-1").isEmpty()); - assertTrue("One 1-30-20-3 node fits on host3", tester.nodeRepository.nodes().getNode("host3").isPresent()); - assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().getNode("hostname100").isPresent()); + assertTrue("Failed host 'host2' is deprovisioned", tester.nodeRepository.nodes().node("host2").isEmpty()); + assertTrue("Node on deprovisioned host removed", tester.nodeRepository.nodes().node("host2-1").isEmpty()); + assertTrue("One 1-30-20-3 node fits on host3", tester.nodeRepository.nodes().node("host3").isPresent()); + assertTrue("New 48-128-1000-10 host added", tester.nodeRepository.nodes().node("hostname100").isPresent()); } @Test @@ -283,7 +283,7 @@ public class DynamicProvisioningMaintainerTest { tester.hostProvisioner.with(Behaviour.failDeprovisioning); tester.maintainer.maintain(); - assertTrue(tester.nodeRepository.nodes().getNode(host2.hostname()).isPresent()); + assertTrue(tester.nodeRepository.nodes().node(host2.hostname()).isPresent()); } @Test @@ -339,10 +339,10 @@ public class DynamicProvisioningMaintainerTest { List.of(new ClusterCapacity(3, 0, 0, 0, 0.0)), ClusterCapacity.class); assertEquals(0, tester.provisionedHostsMatching(sharedHostNodeResources)); - assertTrue(tester.nodeRepository.nodes().getNode("hostname102").isEmpty()); + assertTrue(tester.nodeRepository.nodes().node("hostname102").isEmpty()); tester.maintainer.maintain(); assertEquals(1, tester.provisionedHostsMatching(sharedHostNodeResources)); - assertTrue(tester.nodeRepository.nodes().getNode("hostname102").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("hostname102").isPresent()); // Next maintenance run does nothing tester.assertNodesUnchanged(); @@ -367,14 +367,14 @@ public class DynamicProvisioningMaintainerTest { ClusterCapacity.class); assertEquals(1, tester.provisionedHostsMatching(sharedHostNodeResources)); - assertTrue(tester.nodeRepository.nodes().getNode("hostname102").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("hostname103").isEmpty()); - assertTrue(tester.nodeRepository.nodes().getNode("hostname104").isEmpty()); + assertTrue(tester.nodeRepository.nodes().node("hostname102").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("hostname103").isEmpty()); + assertTrue(tester.nodeRepository.nodes().node("hostname104").isEmpty()); tester.maintainer.maintain(); assertEquals(3, tester.provisionedHostsMatching(sharedHostNodeResources)); - assertTrue(tester.nodeRepository.nodes().getNode("hostname102").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("hostname103").isPresent()); - assertTrue(tester.nodeRepository.nodes().getNode("hostname104").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("hostname102").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("hostname103").isPresent()); + assertTrue(tester.nodeRepository.nodes().node("hostname104").isPresent()); } @Test diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java index 6a10b930871..2191963de8a 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java @@ -268,7 +268,7 @@ public class FailedExpirerTest { } public Node get(String hostname) { - return nodeRepository.nodes().getNode(hostname) + return nodeRepository.nodes().node(hostname) .orElseThrow(() -> new IllegalArgumentException("No such node: " + hostname)); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java index 56f06b59138..16afcb85020 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java @@ -97,11 +97,11 @@ public class NodeFailerTest { .map(Node::state).collect(Collectors.toSet()); assertEquals(Set.of(Node.State.failed), childStates2Iter); // The host itself is still active as it too must be allowed to suspend - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(hostWithHwFailure).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state()); tester.suspend(hostWithHwFailure); tester.runMaintainers(); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(hostWithHwFailure).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithHwFailure).get().state()); assertEquals(4, tester.nodeRepository.nodes().list(Node.State.failed).size()); } @@ -109,7 +109,7 @@ public class NodeFailerTest { public void hw_fail_only_if_whole_host_is_suspended() { NodeFailTester tester = NodeFailTester.withTwoApplicationsOnDocker(6); String hostWithFailureReports = selectFirstParentHostWithNActiveNodesExcept(tester.nodeRepository, 2); - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(hostWithFailureReports).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state()); // The host has 2 nodes in active and 1 ready Map> hostnamesByState = tester.nodeRepository.nodes().list().childrenOf(hostWithFailureReports).asList().stream() @@ -132,40 +132,40 @@ public class NodeFailerTest { // The ready node will be failed, but neither the host nor the 2 active nodes since they have not been suspended tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyChild).get().state()); - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(hostWithFailureReports).get().state()); - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(activeChild1).get().state()); - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(activeChild2).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state()); // Suspending the host will not fail any more since none of the children are suspened tester.suspend(hostWithFailureReports); tester.clock.advance(Duration.ofHours(25)); tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyChild).get().state()); - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(hostWithFailureReports).get().state()); - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(activeChild1).get().state()); - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(activeChild2).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild1).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state()); // Suspending one child node will fail that out. tester.suspend(activeChild1); tester.clock.advance(Duration.ofHours(25)); tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyChild).get().state()); - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(hostWithFailureReports).get().state()); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(activeChild1).get().state()); - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(activeChild2).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(activeChild2).get().state()); // Suspending the second child node will fail that out and the host. tester.suspend(activeChild2); tester.clock.advance(Duration.ofHours(25)); tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyChild).get().state()); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(hostWithFailureReports).get().state()); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(activeChild1).get().state()); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(activeChild2).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyChild).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(hostWithFailureReports).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild1).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(activeChild2).get().state()); } @Test @@ -182,10 +182,10 @@ public class NodeFailerTest { tester.clock.advance(Duration.ofMinutes(65)); tester.runMaintainers(); - assertTrue(tester.nodeRepository.nodes().getNode(host_from_normal_app).get().isDown()); - assertTrue(tester.nodeRepository.nodes().getNode(host_from_suspended_app).get().isDown()); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(host_from_normal_app).get().state()); - assertEquals(Node.State.active, tester.nodeRepository.nodes().getNode(host_from_suspended_app).get().state()); + assertTrue(tester.nodeRepository.nodes().node(host_from_normal_app).get().isDown()); + assertTrue(tester.nodeRepository.nodes().node(host_from_suspended_app).get().isDown()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(host_from_normal_app).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.nodes().node(host_from_suspended_app).get().state()); } @Test @@ -233,8 +233,8 @@ public class NodeFailerTest { assertEquals(4, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); tester.runMaintainers(); assertEquals(2, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyFail1.hostname()).get().state()); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(readyFail2.hostname()).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail1.hostname()).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail2.hostname()).get().state()); String downHost1 = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname(); String downHost2 = tester.nodeRepository.nodes().list(NodeFailTester.app2, Node.State.active).asList().get(3).hostname(); @@ -324,7 +324,7 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(downNode).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state()); // Re-activate the node. It is still down, but should not be failed out until the grace period has passed again tester.nodeRepository.nodes().reactivate(downNode, Agent.system, getClass().getSimpleName()); @@ -337,7 +337,7 @@ public class NodeFailerTest { tester.allNodesMakeAConfigRequestExcept(); tester.runMaintainers(); assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); - assertEquals(Node.State.failed, tester.nodeRepository.nodes().getNode(downNode).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(downNode).get().state()); } @Test diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivatorTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivatorTest.java index 36452e05bb6..3f0b94170f6 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivatorTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivatorTest.java @@ -103,7 +103,7 @@ public class OsUpgradeActivatorTest { private Stream streamUpdatedNodes(List nodes) { Stream stream = Stream.empty(); for (var node : nodes) { - stream = Stream.concat(stream, tester.nodeRepository().nodes().getNode(node.hostname()).stream()); + stream = Stream.concat(stream, tester.nodeRepository().nodes().node(node.hostname()).stream()); } return stream; } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java index a1aa097e4cc..b2ac6788566 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java @@ -184,7 +184,7 @@ public class RebalancerTest { return getNode(node.hostname()).get().allocation().get().membership().retired(); } - Optional getNode(String hostname) { return tester.nodeRepository().nodes().getNode(hostname); } + Optional getNode(String hostname) { return tester.nodeRepository().nodes().node(hostname); } NodeList getNodes(Node.State nodeState) { return tester.nodeRepository().nodes().list(nodeState); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java index c7a1610f75d..09e142b68a0 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java @@ -256,7 +256,7 @@ public class OsVersionsTest { tester.prepareAndActivateInfraApplication(infraApplication, nodeType); return nodes.stream() .map(Node::hostname) - .flatMap(hostname -> tester.nodeRepository().nodes().getNode(hostname).stream()) + .flatMap(hostname -> tester.nodeRepository().nodes().node(hostname).stream()) .collect(Collectors.toList()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java index 39a206e2223..bd342df22f0 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java @@ -52,7 +52,7 @@ public class AclProvisioningTest { // Get trusted nodes for the first active node Node node = activeNodes.get(0); - List host = node.parentHostname().flatMap(tester.nodeRepository().nodes()::getNode).map(List::of).orElseGet(List::of); + List host = node.parentHostname().flatMap(tester.nodeRepository().nodes()::node).map(List::of).orElseGet(List::of); Supplier nodeAcls = () -> node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers()); // Trusted nodes are active nodes in same application, proxy nodes and config servers @@ -94,7 +94,7 @@ public class AclProvisioningTest { NodeList tenantNodes = tester.nodeRepository().nodes().list().nodeType(NodeType.tenant); // Get trusted nodes for the first config server - Node node = tester.nodeRepository().nodes().getNode("cfg1") + Node node = tester.nodeRepository().nodes().node("cfg1") .orElseThrow(() -> new RuntimeException("Failed to find cfg1")); NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java index 48a2d47c173..55ca2d7e55a 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java @@ -434,13 +434,13 @@ public class DockerProvisioningTest { private void assertNodeParentReservation(List nodes, Optional reservation, ProvisioningTester tester) { for (Node node : nodes) - assertEquals(reservation, tester.nodeRepository().nodes().getNode(node.parentHostname().get()).get().reservedTo()); + assertEquals(reservation, tester.nodeRepository().nodes().node(node.parentHostname().get()).get().reservedTo()); } private void assertHostSpecParentReservation(List hostSpecs, Optional reservation, ProvisioningTester tester) { for (HostSpec hostSpec : hostSpecs) { - Node node = tester.nodeRepository().nodes().getNode(hostSpec.hostname()).get(); - assertEquals(reservation, tester.nodeRepository().nodes().getNode(node.parentHostname().get()).get().reservedTo()); + Node node = tester.nodeRepository().nodes().node(hostSpec.hostname()).get(); + assertEquals(reservation, tester.nodeRepository().nodes().node(node.parentHostname().get()).get().reservedTo()); } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java index 596591ad599..124f7db569a 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java @@ -123,7 +123,7 @@ public class NodeTypeProvisioningTest { assertEquals(11, nodes.size()); // Verify that wantToRetire has been propagated - assertTrue(tester.nodeRepository().nodes().getNode(nodeToRetire.hostname()) + assertTrue(tester.nodeRepository().nodes().node(nodeToRetire.hostname()) .flatMap(Node::allocation) .map(allocation -> allocation.membership().retired()) .orElseThrow(RuntimeException::new)); @@ -137,7 +137,7 @@ public class NodeTypeProvisioningTest { assertEquals(11, nodes.size()); // Verify that the node is still marked as retired - assertTrue(tester.nodeRepository().nodes().getNode(nodeToRetire.hostname()) + assertTrue(tester.nodeRepository().nodes().node(nodeToRetire.hostname()) .flatMap(Node::allocation) .map(allocation -> allocation.membership().retired()) .orElseThrow(RuntimeException::new)); @@ -154,7 +154,7 @@ public class NodeTypeProvisioningTest { assertEquals(10, nodes.size()); // Verify that the node is now inactive - assertEquals(Node.State.dirty, tester.nodeRepository().nodes().getNode(nodeToRetire.hostname()) + assertEquals(Node.State.dirty, tester.nodeRepository().nodes().node(nodeToRetire.hostname()) .orElseThrow(RuntimeException::new).state()); } } @@ -233,7 +233,7 @@ public class NodeTypeProvisioningTest { assertEquals(10, nodes.size()); // Verify the node we previously set to retire has finished retiring - assertEquals(Node.State.dirty, tester.nodeRepository().nodes().getNode(currentyRetiringHostname) + assertEquals(Node.State.dirty, tester.nodeRepository().nodes().node(currentyRetiringHostname) .orElseThrow(RuntimeException::new).state()); // Verify that a node is currently retiring diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java index 324f931be14..2b0541ce0c2 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java @@ -32,7 +32,6 @@ import com.yahoo.vespa.service.duper.InfraApplication; import org.junit.Test; import java.time.Duration; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -161,7 +160,7 @@ public class ProvisioningTest { HostSpec host1 = state1.container0.iterator().next(); assertFalse(host1.version().isPresent()); - Node node1 = tester.nodeRepository().nodes().getNode(host1.hostname()).get(); + Node node1 = tester.nodeRepository().nodes().node(host1.hostname()).get(); tester.nodeRepository().nodes().write(node1.with(node1.status().withVespaVersion(Version.fromString("1.2.3"))), () -> {}); // redeploy @@ -187,7 +186,7 @@ public class ProvisioningTest { tester.activate(application1, state1.allHosts); HostSpec host1 = state1.container0.iterator().next(); - Node node1 = tester.nodeRepository().nodes().getNode(host1.hostname()).get(); + Node node1 = tester.nodeRepository().nodes().node(host1.hostname()).get(); DockerImage dockerImage = DockerImage.fromString(dockerImageRepo).withTag(Version.fromString("1.2.3")); tester.nodeRepository().nodes().write(node1.with(node1.status().withContainerImage(dockerImage)), () -> {}); @@ -196,7 +195,7 @@ public class ProvisioningTest { tester.activate(application1, state2.allHosts); host1 = state2.container0.iterator().next(); - node1 = tester.nodeRepository().nodes().getNode(host1.hostname()).get(); + node1 = tester.nodeRepository().nodes().node(host1.hostname()).get(); assertEquals(dockerImage, node1.status().containerImage().get()); } @@ -318,7 +317,7 @@ public class ProvisioningTest { tester.activate(application1, state1.allHosts); tester.nodeRepository().nodes().list(application1) - .forEach(n -> assertEquals(large, tester.nodeRepository().nodes().getNode(n.parentHostname().get()).get().resources())); + .forEach(n -> assertEquals(large, tester.nodeRepository().nodes().node(n.parentHostname().get()).get().resources())); } @Test @@ -785,7 +784,7 @@ public class ProvisioningTest { // Re-deploy application with 1 node less, the retired node should be on the spare host tester.deploy(application, spec, Capacity.from(new ClusterResources(5, 1, defaultResources))); - assertTrue(tester.nodeRepository().nodes().getNode(randomNode.hostname()).get().allocation().get().membership().retired()); + assertTrue(tester.nodeRepository().nodes().node(randomNode.hostname()).get().allocation().get().membership().retired()); } @Test diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java index fd8eaf9d938..86f7ed45ce5 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java @@ -193,7 +193,7 @@ public class ProvisioningTester { nodeRepository.nodes().write(node, lock); } if (node.parentHostname().isEmpty()) continue; - Node parent = nodeRepository.nodes().getNode(node.parentHostname().get()).get(); + Node parent = nodeRepository.nodes().node(node.parentHostname().get()).get(); if (parent.state() == Node.State.active) continue; NestedTransaction t = new NestedTransaction(); if (parent.ipConfig().primary().isEmpty()) @@ -316,7 +316,7 @@ public class ProvisioningTester { } public void fail(String hostname) { - int beforeFailCount = nodeRepository.nodes().getNode(hostname, Node.State.active).get().status().failCount(); + int beforeFailCount = nodeRepository.nodes().node(hostname, Node.State.active).get().status().failCount(); Node failedNode = nodeRepository.nodes().fail(hostname, Agent.system, "Failing to unit test"); assertTrue(nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().contains(failedNode)); assertEquals(beforeFailCount + 1, failedNode.status().failCount()); @@ -561,7 +561,7 @@ public class ProvisioningTester { public void assertAllocatedOn(String explanation, String hostFlavor, ApplicationId app) { for (Node node : nodeRepository.nodes().list(app)) { - Node parent = nodeRepository.nodes().getNode(node.parentHostname().get()).get(); + Node parent = nodeRepository.nodes().node(node.parentHostname().get()).get(); assertEquals(node + ": " + explanation, hostFlavor, parent.flavor().name()); } } @@ -595,7 +595,7 @@ public class ProvisioningTester { public int hostFlavorCount(String hostFlavor, ApplicationId app) { return (int)nodeRepository().nodes().list(app).stream() - .map(n -> nodeRepository().nodes().getNode(n.parentHostname().get()).get()) + .map(n -> nodeRepository().nodes().node(n.parentHostname().get()).get()) .filter(p -> p.flavor().name().equals(hostFlavor)) .count(); } -- cgit v1.2.3 From 8834dbc6c85f1786f1a9e250ac7cc85bebe64491 Mon Sep 17 00:00:00 2001 From: Jon Bratseth Date: Wed, 10 Feb 2021 15:25:53 +0100 Subject: Remove unused methods --- .../src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java | 3 --- 1 file changed, 3 deletions(-) (limited to 'node-repository') diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java index 2dfd23b8680..b7d7ad14b33 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java @@ -97,9 +97,6 @@ public class Nodes { return new LockedNodeList(list().asList(), lock); } - public List getInactive() { return db.readNodes(Node.State.inactive); } - public List getFailed() { return db.readNodes(Node.State.failed); } - /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue -- cgit v1.2.3 From 8e1cae2a0725567b15d49b99317523853cb033e4 Mon Sep 17 00:00:00 2001 From: Jon Bratseth Date: Wed, 10 Feb 2021 20:57:38 +0100 Subject: Remove unnecessary method --- .../vespa/hosted/provision/NodeRepository.java | 2 +- .../hosted/provision/autoscale/Autoscaler.java | 2 +- .../autoscale/MetricsV2MetricsFetcher.java | 2 +- .../maintenance/MaintenanceDeployment.java | 4 +- .../yahoo/vespa/hosted/provision/node/Nodes.java | 11 +----- .../persistence/CuratorDatabaseClient.java | 10 ----- .../provisioning/NodeRepositoryProvisioner.java | 2 +- .../hosted/provision/provisioning/Preparer.java | 4 +- .../provision/restapi/NodesV2ApiHandler.java | 2 +- .../provision/testutils/ServiceMonitorStub.java | 2 +- .../hosted/provision/RealDataScenarioTest.java | 2 +- .../autoscale/AutoscalingIntegrationTest.java | 2 +- .../provision/autoscale/AutoscalingTest.java | 4 +- .../provision/autoscale/AutoscalingTester.java | 12 +++--- .../autoscale/MetricsV2MetricsFetcherTest.java | 2 +- .../maintenance/AutoscalingMaintainerTester.java | 2 +- .../maintenance/LoadBalancerExpirerTest.java | 2 +- .../provision/maintenance/NodeFailTester.java | 10 ++--- .../provision/maintenance/NodeFailerTest.java | 22 +++++------ .../OperatorChangeApplicationMaintainerTest.java | 12 +++--- .../PeriodicApplicationMaintainerTest.java | 26 ++++++------ .../provision/maintenance/RebalancerTest.java | 4 +- .../provision/maintenance/RetiredExpirerTest.java | 28 ++++++------- .../ScalingSuggestionsMaintainerTest.java | 4 +- .../provisioning/AclProvisioningTest.java | 2 +- .../provisioning/DockerProvisioningTest.java | 2 +- .../provisioning/DynamicDockerAllocationTest.java | 8 ++-- .../provisioning/DynamicDockerProvisionTest.java | 14 +++---- .../provisioning/LoadBalancerProvisionerTest.java | 8 ++-- .../provision/provisioning/ProvisioningTest.java | 46 +++++++++++----------- .../provision/provisioning/ProvisioningTester.java | 18 ++++----- 31 files changed, 126 insertions(+), 145 deletions(-) (limited to 'node-repository') diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java index beec04b3b29..55495669802 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java @@ -193,7 +193,7 @@ public class NodeRepository extends AbstractComponent { /** Removes this application: Active nodes are deactivated while all non-active nodes are set dirty. */ public void remove(ApplicationTransaction transaction) { - NodeList applicationNodes = nodes().list(transaction.application()); + NodeList applicationNodes = nodes().list().owner(transaction.application()); NodeList activeNodes = applicationNodes.state(State.active); nodes().deactivate(activeNodes.asList(), transaction); db.writeTo(State.dirty, diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java index 2f01f6b31ae..4ab6f259374 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java @@ -176,7 +176,7 @@ public class Autoscaler { return false; // A deployment is ongoing - if (nodeRepository.nodes().list(nodes.first().get().allocation().get().owner(), Node.State.reserved).size() > 0) + if (nodeRepository.nodes().list(Node.State.reserved).owner(nodes.first().get().allocation().get().owner()).size() > 0) return false; return true; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java index b93c7930b5b..b2d8ddfd414 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcher.java @@ -51,7 +51,7 @@ public class MetricsV2MetricsFetcher extends AbstractComponent implements Metric @Override public CompletableFuture fetchMetrics(ApplicationId application) { - NodeList applicationNodes = nodeRepository.nodes().list(application).state(Node.State.active); + NodeList applicationNodes = nodeRepository.nodes().list().owner(application).state(Node.State.active); Optional metricsV2Container = applicationNodes.container() .matching(node -> expectedUp(node)) diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java index 11292f9aa60..860076dd111 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java @@ -116,7 +116,7 @@ class MaintenanceDeployment implements Closeable { Deployer deployer, NodeRepository nodeRepository) { if (lock.isEmpty()) return Optional.empty(); - if (nodeRepository.nodes().list(application, Node.State.active).isEmpty()) return Optional.empty(); + if (nodeRepository.nodes().list(Node.State.active).owner(application).isEmpty()) return Optional.empty(); return deployer.deployFromLocalActive(application); } @@ -168,7 +168,7 @@ class MaintenanceDeployment implements Closeable { if ( ! deployment.prepare()) return false; if (verifyTarget) { expectedNewNode = - nodeRepository.nodes().list(application, Node.State.reserved).stream() + nodeRepository.nodes().list(Node.State.reserved).owner(application).stream() .filter(n -> !n.hostname().equals(node.hostname())) .filter(n -> n.allocation().get().membership().cluster().id().equals(node.allocation().get().membership().cluster().id())) .findAny(); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java index b7d7ad14b33..3e94201d87a 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java @@ -83,15 +83,6 @@ public class Nodes { return NodeList.copyOf(db.readNodes(inState)); } - /** - * Returns a list of nodes in this repository for an application in any of the given states - * - * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned - */ - public NodeList list(ApplicationId application, Node.State... inState) { - return NodeList.copyOf(db.readNodes(application, inState)); - } - /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); @@ -401,7 +392,7 @@ public class Nodes { // TODO: Work out a safe lock acquisition strategy for moves, e.g. migrate to lockNode. try (Mutex lock = lock(node)) { if (toState == Node.State.active) { - for (Node currentActive : list(node.allocation().get().owner(), Node.State.active)) { + for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java index 95445ad0a66..6150ee9f4a0 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java @@ -278,16 +278,6 @@ public class CuratorDatabaseClient { return nodes; } - /** - * Returns all nodes allocated to the given application which are in one of the given states - * If no states are given this returns all nodes. - */ - public List readNodes(ApplicationId applicationId, Node.State ... states) { - List nodes = readNodes(states); - nodes.removeIf(node -> ! node.allocation().isPresent() || ! node.allocation().get().owner().equals(applicationId)); - return nodes; - } - /** * Returns a particular node, or empty if this node is not in any of the given states. * If no states are given this returns the node if it is present in any state. diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java index a5057bd1134..79e1005eb47 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java @@ -150,7 +150,7 @@ public class NodeRepositoryProvisioner implements Provisioner { private ClusterResources currentResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) { - List nodes = nodeRepository.nodes().list(applicationId, Node.State.active) + List nodes = nodeRepository.nodes().list(Node.State.active).owner(applicationId) .cluster(clusterSpec.id()) .not().retired() .not().removable() diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java index 41a6b0d42b1..87b3742efb4 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java @@ -85,7 +85,7 @@ class Preparer { */ private List findNodesInRemovableGroups(ApplicationId application, ClusterSpec requestedCluster, int wantedGroups) { List surplusNodes = new ArrayList<>(0); - for (Node node : nodeRepository.nodes().list(application, Node.State.active)) { + for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) { ClusterSpec nodeCluster = node.allocation().get().membership().cluster(); if ( ! nodeCluster.id().equals(requestedCluster.id())) continue; if ( ! nodeCluster.type().equals(requestedCluster.type())) continue; @@ -127,7 +127,7 @@ class Preparer { */ private int findHighestIndex(ApplicationId application, ClusterSpec cluster) { int highestIndex = -1; - for (Node node : nodeRepository.nodes().list(application, Node.State.allocatedStates().toArray(new Node.State[0]))) { + for (Node node : nodeRepository.nodes().list(Node.State.allocatedStates().toArray(new Node.State[0])).owner(application)) { ClusterSpec nodeCluster = node.allocation().get().membership().cluster(); if ( ! nodeCluster.id().equals(cluster.id())) continue; if ( ! nodeCluster.type().equals(cluster.type())) continue; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java index 56b46c106ee..c4c11222702 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java @@ -435,7 +435,7 @@ public class NodesV2ApiHandler extends LoggingRequestHandler { if (application.isEmpty()) return ErrorResponse.notFoundError("No application '" + id + "'"); Slime slime = ApplicationSerializer.toSlime(application.get(), - nodeRepository.nodes().list(id, Node.State.active).asList(), + nodeRepository.nodes().list(Node.State.active).owner(id).asList(), withPath("/nodes/v2/applications/" + id, uri)); return new SlimeJsonResponse(slime); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java index d2a2544c89c..23c20adb842 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/ServiceMonitorStub.java @@ -70,7 +70,7 @@ public class ServiceMonitorStub implements ServiceMonitor { Map status = new HashMap<>(); for (Map.Entry app : apps.entrySet()) { Set serviceInstances = new HashSet<>(); - for (Node node : nodeRepository.nodes().list(app.getValue().id(), Node.State.active)) { + for (Node node : nodeRepository.nodes().list(Node.State.active).owner(app.getValue().id())) { serviceInstances.add(new ServiceInstance(new ConfigId("configid"), new HostName(node.hostname()), getHostStatus(node.hostname()))); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java index b17c8fc35b6..60eb66c1779 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java @@ -80,7 +80,7 @@ public class RealDataScenarioTest { }; deploy(tester, app, specs, capacities); - tester.nodeRepository().nodes().list(app).cluster(specs[1].id()).forEach(System.out::println); + tester.nodeRepository().nodes().list().owner(app).cluster(specs[1].id()).forEach(System.out::println); } private void deploy(ProvisioningTester tester, ApplicationId app, ClusterSpec[] specs, Capacity[] capacities) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java index 5f7b4bc865c..3a74c3a3cf6 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java @@ -60,7 +60,7 @@ public class AutoscalingIntegrationTest { tester.nodeRepository().applications().put(application, lock); } var scaledResources = autoscaler.suggest(application.clusters().get(cluster1.id()), - tester.nodeRepository().nodes().list(application1)); + tester.nodeRepository().nodes().list().owner(application1)); assertTrue(scaledResources.isPresent()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java index dbb8c00e44d..dbab02302f8 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java @@ -116,7 +116,7 @@ public class AutoscalingTest { // deploy with slow tester.deploy(application1, cluster1, 5, 1, hostResources); - tester.nodeRepository().nodes().list(application1).stream() + tester.nodeRepository().nodes().list().owner(application1).stream() .allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.slow); tester.clock().advance(Duration.ofDays(2)); @@ -132,7 +132,7 @@ public class AutoscalingTest { assertEquals("Disk speed from min/max is used", NodeResources.DiskSpeed.any, scaledResources.nodeResources().diskSpeed()); tester.deploy(application1, cluster1, scaledResources); - tester.nodeRepository().nodes().list(application1).stream() + tester.nodeRepository().nodes().list().owner(application1).stream() .allMatch(n -> n.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java index 0581f9f84b3..eb490079c98 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java @@ -106,7 +106,7 @@ class AutoscalingTester { public void deactivateRetired(ApplicationId application, ClusterSpec cluster, ClusterResources resources) { try (Mutex lock = nodeRepository().nodes().lock(application)){ - for (Node node : nodeRepository().nodes().list(application, Node.State.active)) { + for (Node node : nodeRepository().nodes().list(Node.State.active).owner(application)) { if (node.allocation().get().membership().retired()) nodeRepository().nodes().write(node.with(node.allocation().get().removable(true)), lock); } @@ -126,7 +126,7 @@ class AutoscalingTester { */ public void addCpuMeasurements(float value, float otherResourcesLoad, int count, ApplicationId applicationId) { - NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active); + NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId); float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size()); for (int i = 0; i < count; i++) { clock().advance(Duration.ofMinutes(1)); @@ -157,7 +157,7 @@ class AutoscalingTester { */ public void addMemMeasurements(float value, float otherResourcesLoad, int count, ApplicationId applicationId) { - NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active); + NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId); float oneExtraNodeFactor = (float)(nodes.size() - 1.0) / (nodes.size()); for (int i = 0; i < count; i++) { clock().advance(Duration.ofMinutes(1)); @@ -182,7 +182,7 @@ class AutoscalingTester { public void addMeasurements(float cpu, float memory, float disk, int generation, boolean inService, boolean stable, int count, ApplicationId applicationId) { - NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active); + NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId); for (int i = 0; i < count; i++) { clock().advance(Duration.ofMinutes(1)); for (Node node : nodes) { @@ -205,7 +205,7 @@ class AutoscalingTester { nodeRepository().applications().put(application, lock); } return autoscaler.autoscale(application.clusters().get(clusterId), - nodeRepository().nodes().list(applicationId, Node.State.active)); + nodeRepository().nodes().list(Node.State.active).owner(applicationId)); } public Autoscaler.Advice suggest(ApplicationId applicationId, ClusterSpec.Id clusterId, @@ -216,7 +216,7 @@ class AutoscalingTester { nodeRepository().applications().put(application, lock); } return autoscaler.suggest(application.clusters().get(clusterId), - nodeRepository().nodes().list(applicationId, Node.State.active)); + nodeRepository().nodes().list(Node.State.active).owner(applicationId)); } public ClusterResources assertResources(String message, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java index 9ac9a182512..384e8dd8439 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/MetricsV2MetricsFetcherTest.java @@ -78,7 +78,7 @@ public class MetricsV2MetricsFetcherTest { { httpClient.cannedResponse = cannedResponseForApplication2; try (Mutex lock = tester.nodeRepository().nodes().lock(application1)) { - tester.nodeRepository().nodes().write(tester.nodeRepository().nodes().list(application2, Node.State.active) + tester.nodeRepository().nodes().write(tester.nodeRepository().nodes().list(Node.State.active).owner(application2) .first().get().retire(tester.clock().instant()), lock); } List> values = new ArrayList<>(fetcher.fetchMetrics(application2).get().metrics()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java index d9ecd0c6653..887ce158e09 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainerTester.java @@ -72,7 +72,7 @@ public class AutoscalingMaintainerTester { } public void addMeasurements(float cpu, float mem, float disk, long generation, int count, ApplicationId applicationId) { - NodeList nodes = nodeRepository().nodes().list(applicationId, Node.State.active); + NodeList nodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId); for (int i = 0; i < count; i++) { for (Node node : nodes) metricsDb.add(List.of(new Pair<>(node.hostname(), new MetricSnapshot(clock().instant(), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java index 32b5f567341..189b32028a2 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java @@ -132,7 +132,7 @@ public class LoadBalancerExpirerTest { } private void dirtyNodesOf(ApplicationId application, ClusterSpec.Id cluster) { - tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list(application).stream() + tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list().owner(application).stream() .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().membership().cluster().id().equals(cluster)) .collect(Collectors.toList()), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java index 5ad01902125..9801233f396 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java @@ -103,8 +103,8 @@ public class NodeFailTester { tester.activate(app1, clusterApp1, capacity1); tester.activate(app2, clusterApp2, capacity2); - assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(app1, Node.State.active).size()); - assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(app2, Node.State.active).size()); + assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app1).size()); + assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app2).size()); Map apps = Map.of( app1, new MockDeployer.ApplicationContext(app1, clusterApp1, capacity1), @@ -134,9 +134,9 @@ public class NodeFailTester { tester.activate(app1, clusterApp1, capacity1); tester.activate(app2, clusterApp2, capacity2); assertEquals(Set.of(tester.nodeRepository.nodes().list().nodeType(NodeType.host).asList()), - Set.of(tester.nodeRepository.nodes().list(tenantHostApp, Node.State.active).asList())); - assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(app1, Node.State.active).size()); - assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(app2, Node.State.active).size()); + Set.of(tester.nodeRepository.nodes().list(Node.State.active).owner(tenantHostApp).asList())); + assertEquals(capacity1.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app1).size()); + assertEquals(capacity2.minResources().nodes(), tester.nodeRepository.nodes().list(Node.State.active).owner(app2).size()); Map apps = Map.of( tenantHostApp, new MockDeployer.ApplicationContext(tenantHostApp, clusterNodeAdminApp, allHosts), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java index 16afcb85020..ca1fa2831b8 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java @@ -174,8 +174,8 @@ public class NodeFailerTest { tester.suspend(NodeFailTester.app1); // Set two nodes down (one for each application) and wait 65 minutes - String host_from_suspended_app = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname(); - String host_from_normal_app = tester.nodeRepository.nodes().list(NodeFailTester.app2, Node.State.active).asList().get(3).hostname(); + String host_from_suspended_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname(); + String host_from_normal_app = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname(); tester.serviceMonitor.setHostDown(host_from_suspended_app); tester.serviceMonitor.setHostDown(host_from_normal_app); tester.runMaintainers(); @@ -192,15 +192,15 @@ public class NodeFailerTest { public void zone_is_not_working_if_too_many_nodes_down() { NodeFailTester tester = NodeFailTester.withTwoApplications(); - tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(0).hostname()); + tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(0).hostname()); tester.runMaintainers(); assertTrue(tester.nodeRepository.nodes().isWorking()); - tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname()); + tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname()); tester.runMaintainers(); assertTrue(tester.nodeRepository.nodes().isWorking()); - tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(2).hostname()); + tester.serviceMonitor.setHostDown(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(2).hostname()); tester.runMaintainers(); assertFalse(tester.nodeRepository.nodes().isWorking()); @@ -236,8 +236,8 @@ public class NodeFailerTest { assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail1.hostname()).get().state()); assertEquals(Node.State.failed, tester.nodeRepository.nodes().node(readyFail2.hostname()).get().state()); - String downHost1 = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname(); - String downHost2 = tester.nodeRepository.nodes().list(NodeFailTester.app2, Node.State.active).asList().get(3).hostname(); + String downHost1 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname(); + String downHost2 = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app2).asList().get(3).hostname(); tester.serviceMonitor.setHostDown(downHost1); tester.serviceMonitor.setHostDown(downHost2); // nothing happens the first 45 minutes @@ -281,7 +281,7 @@ public class NodeFailerTest { assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); // the last host goes down - Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active)); + Node lastNode = tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1)); tester.serviceMonitor.setHostDown(lastNode.hostname()); // it is not failed because there are no ready nodes to replace it for (int minutes = 0; minutes < 75; minutes +=5 ) { @@ -305,7 +305,7 @@ public class NodeFailerTest { assertEquals( 5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); assertEquals( 0, tester.nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); assertTrue("The index of the last failed node is not reused", - tester.highestIndex(tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active)).allocation().get().membership().index() + tester.highestIndex(tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1)).allocation().get().membership().index() > lastNode.allocation().get().membership().index()); } @@ -313,7 +313,7 @@ public class NodeFailerTest { @Test public void re_activate_grace_period_test() { NodeFailTester tester = NodeFailTester.withTwoApplications(); - String downNode = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).asList().get(1).hostname(); + String downNode = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).asList().get(1).hostname(); tester.serviceMonitor.setHostDown(downNode); tester.allNodesMakeAConfigRequestExcept(); @@ -350,7 +350,7 @@ public class NodeFailerTest { ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test")).vespaVersion("6.42").build(); tester.activate(NodeFailTester.app1, cluster, capacity); - String downHost = tester.nodeRepository.nodes().list(NodeFailTester.app1, Node.State.active).first().get().hostname(); + String downHost = tester.nodeRepository.nodes().list(Node.State.active).owner(NodeFailTester.app1).first().get().hostname(); tester.serviceMonitor.setHostDown(downHost); // nothing happens the first 45 minutes diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java index b90f98c9ad8..db6aebacddc 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java @@ -51,19 +51,19 @@ public class OperatorChangeApplicationMaintainerTest { maintainer.maintain(); assertEquals("No changes -> no redeployments", 3, fixture.deployer.redeployments); - nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test"); + nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test"); clock.advance(Duration.ofMinutes(2)); maintainer.maintain(); assertEquals("System change -> no redeployments", 3, fixture.deployer.redeployments); clock.advance(Duration.ofSeconds(1)); - nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app2).asList().get(4).hostname(), Agent.operator, "Manual node failing"); + nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app2).asList().get(4).hostname(), Agent.operator, "Manual node failing"); clock.advance(Duration.ofMinutes(2)); maintainer.maintain(); assertEquals("Operator change -> redeployment", 4, fixture.deployer.redeployments); clock.advance(Duration.ofSeconds(1)); - nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app3).asList().get(1).hostname(), Agent.operator, "Manual node failing"); + nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app3).asList().get(1).hostname(), Agent.operator, "Manual node failing"); clock.advance(Duration.ofMinutes(2)); maintainer.maintain(); assertEquals("Operator change -> redeployment", 5, fixture.deployer.redeployments); @@ -104,9 +104,9 @@ public class OperatorChangeApplicationMaintainerTest { deployer.deployFromLocalActive(app1, false).get().activate(); deployer.deployFromLocalActive(app2, false).get().activate(); deployer.deployFromLocalActive(app3, false).get().activate(); - assertEquals(wantedNodesApp1, nodeRepository.nodes().list(app1, Node.State.active).size()); - assertEquals(wantedNodesApp2, nodeRepository.nodes().list(app2, Node.State.active).size()); - assertEquals(wantedNodesApp3, nodeRepository.nodes().list(app3, Node.State.active).size()); + assertEquals(wantedNodesApp1, nodeRepository.nodes().list(Node.State.active).owner(app1).size()); + assertEquals(wantedNodesApp2, nodeRepository.nodes().list(Node.State.active).owner(app2).size()); + assertEquals(wantedNodesApp3, nodeRepository.nodes().list(Node.State.active).owner(app3).size()); } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java index 67b10a351cb..e280a0211e4 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java @@ -64,21 +64,21 @@ public class PeriodicApplicationMaintainerTest { fixture.setBootstrapping(false); // Fail and park some nodes - nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test"); - nodeRepository.nodes().fail(nodeRepository.nodes().list(fixture.app2).asList().get(0).hostname(), Agent.system, "Failing to unit test"); - nodeRepository.nodes().park(nodeRepository.nodes().list(fixture.app2).asList().get(4).hostname(), true, Agent.system, "Parking to unit test"); + nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test"); + nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app2).asList().get(0).hostname(), Agent.system, "Failing to unit test"); + nodeRepository.nodes().park(nodeRepository.nodes().list().owner(fixture.app2).asList().get(4).hostname(), true, Agent.system, "Parking to unit test"); int failedInApp1 = 1; int failedOrParkedInApp2 = 2; - assertEquals(fixture.wantedNodesApp1 - failedInApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size()); - assertEquals(fixture.wantedNodesApp2 - failedOrParkedInApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size()); + assertEquals(fixture.wantedNodesApp1 - failedInApp1, nodeRepository.nodes().list(Node.State.active).owner(fixture.app1).size()); + assertEquals(fixture.wantedNodesApp2 - failedOrParkedInApp2, nodeRepository.nodes().list(Node.State.active).owner(fixture.app2).size()); assertEquals(failedInApp1 + failedOrParkedInApp2, nodeRepository.nodes().list(Node.State.failed, Node.State.parked).nodeType(NodeType.tenant).size()); assertEquals(3, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); assertEquals(2, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.host).size()); // Cause maintenance deployment which will allocate replacement nodes fixture.runApplicationMaintainer(); - assertEquals(fixture.wantedNodesApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size()); - assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size()); + assertEquals(fixture.wantedNodesApp1, nodeRepository.nodes().list(Node.State.active).owner(fixture.app1).size()); + assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(Node.State.active).owner(fixture.app2).size()); assertEquals(0, nodeRepository.nodes().list(Node.State.ready).nodeType(NodeType.tenant).size()); // Reactivate the previously failed nodes @@ -88,8 +88,8 @@ public class PeriodicApplicationMaintainerTest { int reactivatedInApp1 = 1; int reactivatedInApp2 = 2; assertEquals(0, nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size()); - assertEquals(fixture.wantedNodesApp1 + reactivatedInApp1, nodeRepository.nodes().list(fixture.app1, Node.State.active).size()); - assertEquals(fixture.wantedNodesApp2 + reactivatedInApp2, nodeRepository.nodes().list(fixture.app2, Node.State.active).size()); + assertEquals(fixture.wantedNodesApp1 + reactivatedInApp1, nodeRepository.nodes().list(Node.State.active).owner(fixture.app1).size()); + assertEquals(fixture.wantedNodesApp2 + reactivatedInApp2, nodeRepository.nodes().list(Node.State.active).owner(fixture.app2).size()); assertEquals("The reactivated nodes are now active but not part of the application", 0, fixture.getNodes(Node.State.active).retired().size()); @@ -112,13 +112,13 @@ public class PeriodicApplicationMaintainerTest { // Remove one application without letting the application maintainer know about it fixture.remove(fixture.app2); - assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(fixture.app2, Node.State.inactive).size()); + assertEquals(fixture.wantedNodesApp2, nodeRepository.nodes().list(Node.State.inactive).owner(fixture.app2).size()); // Nodes belonging to app2 are inactive after maintenance fixture.maintainer.setOverriddenNodesNeedingMaintenance(frozenActiveNodes); fixture.runApplicationMaintainer(); assertEquals("Inactive nodes were incorrectly activated after maintenance", fixture.wantedNodesApp2, - nodeRepository.nodes().list(fixture.app2, Node.State.inactive).size()); + nodeRepository.nodes().list(Node.State.inactive).owner(fixture.app2).size()); } @Test(timeout = 60_000) @@ -232,8 +232,8 @@ public class PeriodicApplicationMaintainerTest { void activate() { deployer.deployFromLocalActive(app1, false).get().activate(); deployer.deployFromLocalActive(app2, false).get().activate(); - assertEquals(wantedNodesApp1, nodeRepository.nodes().list(app1, Node.State.active).size()); - assertEquals(wantedNodesApp2, nodeRepository.nodes().list(app2, Node.State.active).size()); + assertEquals(wantedNodesApp1, nodeRepository.nodes().list(Node.State.active).owner(app1).size()); + assertEquals(wantedNodesApp2, nodeRepository.nodes().list(Node.State.active).owner(app2).size()); } void remove(ApplicationId application) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java index b2ac6788566..9fddaab8b3b 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java @@ -177,7 +177,7 @@ public class RebalancerTest { } List getNodes(ApplicationId applicationId, Node.State nodeState) { - return tester.nodeRepository().nodes().list(applicationId, nodeState).asList(); + return tester.nodeRepository().nodes().list(nodeState).owner(applicationId).asList(); } boolean isNodeRetired(Node node) { @@ -188,7 +188,7 @@ public class RebalancerTest { NodeList getNodes(Node.State nodeState) { return tester.nodeRepository().nodes().list(nodeState); } - Node getNode(ApplicationId applicationId) { return tester.nodeRepository().nodes().list(applicationId).first().get(); } + Node getNode(ApplicationId applicationId) { return tester.nodeRepository().nodes().list().owner(applicationId).first().get(); } ManualClock clock() { return tester.clock(); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java index 129e4e3a775..c1c6e5b6154 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java @@ -71,8 +71,8 @@ public class RetiredExpirerTest { activate(applicationId, cluster, wantedNodes=7, 1); activate(applicationId, cluster, wantedNodes=2, 1); activate(applicationId, cluster, wantedNodes=3, 1); - assertEquals(7, nodeRepository.nodes().list(applicationId, Node.State.active).size()); - assertEquals(0, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); + assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size()); + assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size()); // Cause inactivation of retired nodes clock.advance(Duration.ofHours(30)); // Retire period spent @@ -83,12 +83,12 @@ public class RetiredExpirerTest { cluster, Capacity.from(new ClusterResources(wantedNodes, 1, nodeResources))))); createRetiredExpirer(deployer).run(); - assertEquals(3, nodeRepository.nodes().list(applicationId, Node.State.active).size()); - assertEquals(4, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); + assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size()); + assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size()); assertEquals(1, deployer.redeployments); // inactivated nodes are not retired - for (Node node : nodeRepository.nodes().list(applicationId, Node.State.inactive)) + for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId)) assertFalse(node.allocation().get().membership().retired()); } @@ -106,8 +106,8 @@ public class RetiredExpirerTest { activate(applicationId, cluster, wantedNodes=7, 1); activate(applicationId, cluster, wantedNodes=2, 1); activate(applicationId, cluster, wantedNodes=3, 1); - assertEquals(7, nodeRepository.nodes().list(applicationId, Node.State.active).size()); - assertEquals(0, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); + assertEquals(7, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size()); + assertEquals(0, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size()); // Cause inactivation of retired nodes MockDeployer deployer = @@ -128,27 +128,27 @@ public class RetiredExpirerTest { RetiredExpirer retiredExpirer = createRetiredExpirer(deployer); retiredExpirer.run(); - assertEquals(5, nodeRepository.nodes().list(applicationId, Node.State.active).size()); - assertEquals(2, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); + assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size()); + assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size()); assertEquals(1, deployer.redeployments); verify(orchestrator, times(4)).acquirePermissionToRemove(any()); // Running it again has no effect retiredExpirer.run(); - assertEquals(5, nodeRepository.nodes().list(applicationId, Node.State.active).size()); - assertEquals(2, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); + assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size()); + assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size()); assertEquals(1, deployer.redeployments); verify(orchestrator, times(6)).acquirePermissionToRemove(any()); clock.advance(RETIRED_EXPIRATION.plusMinutes(1)); retiredExpirer.run(); - assertEquals(3, nodeRepository.nodes().list(applicationId, Node.State.active).size()); - assertEquals(4, nodeRepository.nodes().list(applicationId, Node.State.inactive).size()); + assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size()); + assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size()); assertEquals(2, deployer.redeployments); verify(orchestrator, times(6)).acquirePermissionToRemove(any()); // inactivated nodes are not retired - for (Node node : nodeRepository.nodes().list(applicationId, Node.State.inactive)) + for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId)) assertFalse(node.allocation().get().membership().retired()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java index 0d2de73635e..6581008268d 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java @@ -115,14 +115,14 @@ public class ScalingSuggestionsMaintainerTest { } private boolean shouldSuggest(ApplicationId app, ClusterSpec cluster, ProvisioningTester tester) { - var currentResources = tester.nodeRepository().nodes().list(app).cluster(cluster.id()).not().retired().toResources(); + var currentResources = tester.nodeRepository().nodes().list().owner(app).cluster(cluster.id()).not().retired().toResources(); return tester.nodeRepository().applications().get(app).get().cluster(cluster.id()).get() .shouldSuggestResources(currentResources); } public void addMeasurements(float cpu, float memory, float disk, int generation, int count, ApplicationId applicationId, NodeRepository nodeRepository, MetricsDb db) { - NodeList nodes = nodeRepository.nodes().list(applicationId, Node.State.active); + NodeList nodes = nodeRepository.nodes().list(Node.State.active).owner(applicationId); for (int i = 0; i < count; i++) { for (Node node : nodes) db.add(List.of(new Pair<>(node.hostname(), new MetricSnapshot(nodeRepository.clock().instant(), diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java index bd342df22f0..8033663c6cf 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java @@ -115,7 +115,7 @@ public class AclProvisioningTest { tester.deploy(zoneApplication, Capacity.fromRequiredNodeType(NodeType.proxy)); // Get trusted nodes for first proxy node - NodeList proxyNodes = tester.nodeRepository().nodes().list(zoneApplication); + NodeList proxyNodes = tester.nodeRepository().nodes().list().owner(zoneApplication); Node node = proxyNodes.first().get(); NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java index 55ca2d7e55a..12a8b476d5e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java @@ -366,7 +366,7 @@ public class DockerProvisioningTest { tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, r))); var tx = new ApplicationTransaction(new ProvisionLock(app1, tester.nodeRepository().nodes().lock(app1)), new NestedTransaction()); - tester.nodeRepository().nodes().deactivate(tester.nodeRepository().nodes().list(app1, Node.State.active).retired().asList(), tx); + tester.nodeRepository().nodes().deactivate(tester.nodeRepository().nodes().list(Node.State.active).owner(app1).retired().asList(), tx); tx.nested().commit(); assertEquals(2, tester.getNodes(app1, Node.State.active).size()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java index 94ce5b8c5fb..cf7083ccc4f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerAllocationTest.java @@ -315,7 +315,7 @@ public class DynamicDockerAllocationTest { List hosts = tester.prepare(application, clusterSpec("myContent.t1.a1"), 2, 1, new NodeResources(1, 4, 100, 1)); tester.activate(application, hosts); - NodeList activeNodes = tester.nodeRepository().nodes().list(application); + NodeList activeNodes = tester.nodeRepository().nodes().list().owner(application); assertEquals(ImmutableSet.of("127.0.127.13", "::13"), activeNodes.asList().get(0).ipConfig().primary()); assertEquals(ImmutableSet.of("127.0.127.2", "::2"), activeNodes.asList().get(1).ipConfig().primary()); } @@ -437,16 +437,16 @@ public class DynamicDockerAllocationTest { // Redeploy does not change allocation as a host with switch information is no better or worse than hosts // without switch information - NodeList allocatedNodes = tester.nodeRepository().nodes().list(app1); + NodeList allocatedNodes = tester.nodeRepository().nodes().list().owner(app1); tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); - assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list(app1)); + assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list().owner(app1)); // Initial hosts are attached to the same switch tester.patchNodes(hosts0, (host) -> host.withSwitchHostname(switch0)); // Redeploy does not change allocation tester.activate(app1, tester.prepare(app1, cluster, Capacity.from(new ClusterResources(2, 1, resources)))); - assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list(app1)); + assertEquals("Allocation unchanged", allocatedNodes, tester.nodeRepository().nodes().list().owner(app1)); // One regular host and one slow-disk host are provisioned on the same switch String switch1 = "switch1"; diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java index 5f8a0c99b9f..7d8fec95d3a 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java @@ -167,19 +167,19 @@ public class DynamicDockerProvisionTest { mockHostProvisioner(hostProvisioner, "large", 3, null); // Provision shared hosts prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources); - Set initialNodes = tester.nodeRepository().nodes().list(application1).stream().collect(Collectors.toSet()); + Set initialNodes = tester.nodeRepository().nodes().list().owner(application1).stream().collect(Collectors.toSet()); assertEquals(4, initialNodes.size()); // Redeploy same application with exclusive=true mockHostProvisioner(hostProvisioner, "large", 3, application1); prepareAndActivate(application1, clusterSpec("mycluster", true), 4, 1, resources); - assertEquals(8, tester.nodeRepository().nodes().list(application1).size()); - assertEquals(initialNodes, tester.nodeRepository().nodes().list(application1).retired().stream().collect(Collectors.toSet())); + assertEquals(8, tester.nodeRepository().nodes().list().owner(application1).size()); + assertEquals(initialNodes, tester.nodeRepository().nodes().list().owner(application1).retired().stream().collect(Collectors.toSet())); // Redeploy without exclusive again is no-op prepareAndActivate(application1, clusterSpec("mycluster"), 4, 1, resources); - assertEquals(8, tester.nodeRepository().nodes().list(application1).size()); - assertEquals(initialNodes, tester.nodeRepository().nodes().list(application1).retired().stream().collect(Collectors.toSet())); + assertEquals(8, tester.nodeRepository().nodes().list().owner(application1).size()); + assertEquals(initialNodes, tester.nodeRepository().nodes().list().owner(application1).retired().stream().collect(Collectors.toSet())); } @Test @@ -188,7 +188,7 @@ public class DynamicDockerProvisionTest { ApplicationId app = ProvisioningTester.applicationId(); Function retireNode = node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, Instant.now())); - Function getNodeInGroup = group -> tester.nodeRepository().nodes().list(app).stream() + Function getNodeInGroup = group -> tester.nodeRepository().nodes().list().owner(app).stream() .filter(node -> node.allocation().get().membership().cluster().group().get().index() == group) .findAny().orElseThrow(); @@ -209,7 +209,7 @@ public class DynamicDockerProvisionTest { tester.prepare(app, clusterSpec("content"), 8, 2, resources); // Verify that nodes have unique indices from 0..9 - var indices = tester.nodeRepository().nodes().list(app).stream() + var indices = tester.nodeRepository().nodes().list().owner(app).stream() .map(node -> node.allocation().get().membership().index()) .collect(Collectors.toSet()); assertTrue(indices.containsAll(IntStream.range(0, 10).boxed().collect(Collectors.toList()))); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java index a5e7704cce7..db6cc1d5fa6 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java @@ -137,7 +137,7 @@ public class LoadBalancerProvisionerTest { // Entire application is removed: Nodes and load balancer are deactivated tester.remove(app1); dirtyNodesOf(app1); - assertTrue("No nodes are allocated to " + app1, tester.nodeRepository().nodes().list(app1, Node.State.reserved, Node.State.active).isEmpty()); + assertTrue("No nodes are allocated to " + app1, tester.nodeRepository().nodes().list(Node.State.reserved, Node.State.active).owner(app1).isEmpty()); assertEquals(2, lbApp1.get().size()); assertTrue("Deactivated load balancers", lbApp1.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.inactive)); assertTrue("Load balancers for " + app2 + " remain active", lbApp2.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.active)); @@ -168,7 +168,7 @@ public class LoadBalancerProvisionerTest { var nodes = tester.prepare(app1, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")), 2 , 1, resources); Supplier lb = () -> tester.nodeRepository().loadBalancers().list(app1).asList().get(0); assertTrue("Load balancer provisioned with empty reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty()); - assignIps(tester.nodeRepository().nodes().list(app1)); + assignIps(tester.nodeRepository().nodes().list().owner(app1)); tester.activate(app1, nodes); assertFalse("Load balancer is reconfigured with reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty()); @@ -181,7 +181,7 @@ public class LoadBalancerProvisionerTest { // Application is redeployed nodes = tester.prepare(app1, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")), 2 , 1, resources); assertTrue("Load balancer is reconfigured with empty reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty()); - assignIps(tester.nodeRepository().nodes().list(app1)); + assignIps(tester.nodeRepository().nodes().list().owner(app1)); tester.activate(app1, nodes); assertFalse("Load balancer is reconfigured with reals", tester.loadBalancerService().instances().get(lb.get().id()).reals().isEmpty()); } @@ -270,7 +270,7 @@ public class LoadBalancerProvisionerTest { } private void dirtyNodesOf(ApplicationId application) { - tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list(application).asList(), Agent.system, this.getClass().getSimpleName()); + tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list().owner(application).asList(), Agent.system, this.getClass().getSimpleName()); } private Set prepare(ApplicationId application, ClusterSpec... specs) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java index 2b0541ce0c2..9af137aa6d8 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java @@ -95,7 +95,7 @@ public class ProvisioningTest { HostSpec removed = tester.removeOne(state5.allHosts); tester.activate(application1, state5.allHosts); assertEquals(removed.hostname(), - tester.nodeRepository().nodes().list(application1, Node.State.inactive).first().get().hostname()); + tester.nodeRepository().nodes().list(Node.State.inactive).owner(application1).first().get().hostname()); // remove some of the clusters SystemState state6 = prepare(application1, 0, 2, 0, 3, defaultResources, tester); @@ -108,13 +108,13 @@ public class ProvisioningTest { NodeList previouslyInactive = tester.getNodes(application1, Node.State.inactive); tester.remove(application1); assertEquals(tester.toHostNames(previouslyActive.not().container()), - tester.toHostNames(tester.nodeRepository().nodes().list(application1, Node.State.inactive))); + tester.toHostNames(tester.nodeRepository().nodes().list(Node.State.inactive).owner(application1))); assertTrue(tester.nodeRepository().nodes().list(Node.State.dirty).asList().containsAll(previouslyActive.container().asList())); assertEquals(0, tester.getNodes(application1, Node.State.active).size()); assertTrue(tester.nodeRepository().applications().get(application1).isEmpty()); // other application is unaffected - assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().nodes().list(application2, Node.State.active))); + assertEquals(state1App2.hostNames(), tester.toHostNames(tester.nodeRepository().nodes().list(Node.State.active).owner(application2))); // fail a node from app2 and make sure it does not get inactive nodes from first HostSpec failed = tester.removeOne(state1App2.allHosts); @@ -289,7 +289,7 @@ public class ProvisioningTest { // redeploy with increased sizes and new flavor SystemState state3 = prepare(application1, 3, 4, 4, 5, large, tester); - assertEquals("New nodes are reserved", 16, tester.nodeRepository().nodes().list(application1, Node.State.reserved).size()); + assertEquals("New nodes are reserved", 16, tester.nodeRepository().nodes().list(Node.State.reserved).owner(application1).size()); tester.activate(application1, state3.allHosts); assertEquals("small container nodes are retired because we are swapping the entire cluster", 2 + 2, tester.getNodes(application1, Node.State.active).retired().type(ClusterSpec.Type.container).resources(small).size()); @@ -316,7 +316,7 @@ public class ProvisioningTest { SystemState state1 = prepare(application1, 2, 2, 4, 4, small, tester); tester.activate(application1, state1.allHosts); - tester.nodeRepository().nodes().list(application1) + tester.nodeRepository().nodes().list().owner(application1) .forEach(n -> assertEquals(large, tester.nodeRepository().nodes().node(n.parentHostname().get()).get().resources())); } @@ -374,7 +374,7 @@ public class ProvisioningTest { assertEquals(6, state.allHosts.size()); tester.activate(application, state.allHosts); assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any)); - assertTrue(tester.nodeRepository().nodes().list(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any)); + assertTrue(tester.nodeRepository().nodes().list().owner(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any)); } { @@ -386,7 +386,7 @@ public class ProvisioningTest { assertEquals(8, state.allHosts.size()); tester.activate(application, state.allHosts); assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.fast)); - assertTrue(tester.nodeRepository().nodes().list(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast)); + assertTrue(tester.nodeRepository().nodes().list().owner(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.fast)); } { @@ -397,7 +397,7 @@ public class ProvisioningTest { assertEquals(8, state.allHosts.size()); tester.activate(application, state.allHosts); assertTrue(state.allHosts.stream().allMatch(host -> host.requestedResources().get().diskSpeed() == NodeResources.DiskSpeed.any)); - assertTrue(tester.nodeRepository().nodes().list(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any)); + assertTrue(tester.nodeRepository().nodes().list().owner(application).stream().allMatch(node -> node.allocation().get().requestedResources().diskSpeed() == NodeResources.DiskSpeed.any)); } } @@ -692,25 +692,25 @@ public class ProvisioningTest { // Allocate 5 nodes ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build(); tester.activate(application, tester.prepare(application, cluster, capacity)); - assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).not().retired().size()); - assertEquals(0, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); + assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).not().retired().size()); + assertEquals(0, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size()); // Mark the nodes as want to retire - tester.nodeRepository().nodes().list(application, Node.State.active).forEach(node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, tester.clock().instant()))); + tester.nodeRepository().nodes().list(Node.State.active).owner(application).forEach(node -> tester.patchNode(node, (n) -> n.withWantToRetire(true, Agent.system, tester.clock().instant()))); // redeploy without allow failing tester.activate(application, tester.prepare(application, cluster, capacityFORCED)); // Nodes are not retired since that is unsafe when we cannot fail - assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).not().retired().size()); - assertEquals(0, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); + assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).not().retired().size()); + assertEquals(0, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size()); // ... but we still want to - tester.nodeRepository().nodes().list(application, Node.State.active).forEach(node -> assertTrue(node.status().wantToRetire())); + tester.nodeRepository().nodes().list(Node.State.active).owner(application).forEach(node -> assertTrue(node.status().wantToRetire())); // redeploy with allowing failing tester.activate(application, tester.prepare(application, cluster, capacity)); // ... old nodes are now retired - assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).not().retired().size()); - assertEquals(5, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); + assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).not().retired().size()); + assertEquals(5, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size()); } @Test @@ -723,17 +723,17 @@ public class ProvisioningTest { ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("4.5.6").build(); tester.activate(application, tester.prepare(application, cluster, capacityCanFail)); - assertEquals(0, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); + assertEquals(0, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size()); - tester.patchNode(tester.nodeRepository().nodes().list(application).stream().findAny().orElseThrow(), n -> n.withWantToRetire(true, Agent.system, tester.clock().instant())); + tester.patchNode(tester.nodeRepository().nodes().list().owner(application).stream().findAny().orElseThrow(), n -> n.withWantToRetire(true, Agent.system, tester.clock().instant())); tester.activate(application, tester.prepare(application, cluster, capacityCanFail)); - assertEquals(1, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); - assertEquals(6, tester.nodeRepository().nodes().list(application, Node.State.active).size()); + assertEquals(1, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size()); + assertEquals(6, tester.nodeRepository().nodes().list(Node.State.active).owner(application).size()); Capacity capacityCannotFail = Capacity.from(new ClusterResources(5, 1, defaultResources), false, false); tester.activate(application, tester.prepare(application, cluster, capacityCannotFail)); - assertEquals(1, tester.nodeRepository().nodes().list(application, Node.State.active).retired().size()); - assertEquals(6, tester.nodeRepository().nodes().list(application, Node.State.active).size()); + assertEquals(1, tester.nodeRepository().nodes().list(Node.State.active).owner(application).retired().size()); + assertEquals(6, tester.nodeRepository().nodes().list(Node.State.active).owner(application).size()); } @Test @@ -901,7 +901,7 @@ public class ProvisioningTest { try { prepareAndActivate.apply(cfgApp); } catch (ParentHostUnavailableException ignored) { } - assertEquals(2, tester.nodeRepository().nodes().list(cfgApp.getApplicationId()).state(Node.State.reserved).size()); + assertEquals(2, tester.nodeRepository().nodes().list().owner(cfgApp.getApplicationId()).state(Node.State.reserved).size()); prepareAndActivate.apply(cfgHostApp); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java index 86f7ed45ce5..23f504a9c0f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java @@ -143,7 +143,7 @@ public class ProvisioningTester { public NodeRepositoryProvisioner provisioner() { return provisioner; } public LoadBalancerServiceMock loadBalancerService() { return loadBalancerService; } public CapacityPolicies capacityPolicies() { return capacityPolicies; } - public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(id, inState); } + public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(inState).owner(id); } public Node patchNode(Node node, UnaryOperator patcher) { return patchNodes(List.of(node), patcher).get(0); @@ -170,12 +170,12 @@ public class ProvisioningTester { } public List prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity) { - Set reservedBefore = toHostNames(nodeRepository.nodes().list(application, Node.State.reserved)); - Set inactiveBefore = toHostNames(nodeRepository.nodes().list(application, Node.State.inactive)); + Set reservedBefore = toHostNames(nodeRepository.nodes().list(Node.State.reserved).owner(application)); + Set inactiveBefore = toHostNames(nodeRepository.nodes().list(Node.State.inactive).owner(application)); List hosts1 = provisioner.prepare(application, cluster, capacity, provisionLogger); List hosts2 = provisioner.prepare(application, cluster, capacity, provisionLogger); assertEquals("Prepare is idempotent", hosts1, hosts2); - Set newlyActivated = toHostNames(nodeRepository.nodes().list(application, Node.State.reserved)); + Set newlyActivated = toHostNames(nodeRepository.nodes().list(Node.State.reserved).owner(application)); newlyActivated.removeAll(reservedBefore); newlyActivated.removeAll(inactiveBefore); return hosts1; @@ -213,7 +213,7 @@ public class ProvisioningTester { provisioner.activate(hosts, new ActivationContext(0), new ApplicationTransaction(lock, transaction)); transaction.commit(); } - assertEquals(toHostNames(hosts), toHostNames(nodeRepository.nodes().list(application, Node.State.active))); + assertEquals(toHostNames(hosts), toHostNames(nodeRepository.nodes().list(Node.State.active).owner(application))); return hosts; } @@ -259,7 +259,7 @@ public class ProvisioningTester { * number of matches to the given filters */ public void assertRestartCount(ApplicationId application, HostFilter... filters) { - for (Node node : nodeRepository.nodes().list(application, Node.State.active)) { + for (Node node : nodeRepository.nodes().list(Node.State.active).owner(application)) { int expectedRestarts = 0; for (HostFilter filter : filters) if (NodeHostFilter.from(filter).matches(node)) @@ -464,7 +464,7 @@ public class ProvisioningTester { application.getClusterSpecWithVersion(configServersVersion), application.getCapacity()); activate(application.getApplicationId(), new HashSet<>(hosts)); - return nodeRepository.nodes().list(application.getApplicationId(), Node.State.active); + return nodeRepository.nodes().list(Node.State.active).owner(application.getApplicationId()); } public List makeReadyNodes(int n, String flavor, NodeType type, int ipAddressPoolSize) { @@ -560,7 +560,7 @@ public class ProvisioningTester { } public void assertAllocatedOn(String explanation, String hostFlavor, ApplicationId app) { - for (Node node : nodeRepository.nodes().list(app)) { + for (Node node : nodeRepository.nodes().list().owner(app)) { Node parent = nodeRepository.nodes().node(node.parentHostname().get()).get(); assertEquals(node + ": " + explanation, hostFlavor, parent.flavor().name()); } @@ -594,7 +594,7 @@ public class ProvisioningTester { } public int hostFlavorCount(String hostFlavor, ApplicationId app) { - return (int)nodeRepository().nodes().list(app).stream() + return (int)nodeRepository().nodes().list().owner(app).stream() .map(n -> nodeRepository().nodes().node(n.parentHostname().get()).get()) .filter(p -> p.flavor().name().equals(hostFlavor)) .count(); -- cgit v1.2.3