diff options
6 files changed, 42 insertions, 32 deletions
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java index 0ceec0ab00b..75b0519032e 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java @@ -80,6 +80,7 @@ import java.util.stream.Collectors; import static com.yahoo.config.model.api.container.ContainerServiceType.CLUSTERCONTROLLER_CONTAINER; import static com.yahoo.config.model.api.container.ContainerServiceType.CONTAINER; import static com.yahoo.config.model.api.container.ContainerServiceType.LOGSERVER_CONTAINER; +import static com.yahoo.vespa.config.server.tenant.TenantRepository.HOSTED_VESPA_TENANT; import static java.nio.file.Files.readAttributes; /** @@ -605,7 +606,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye return tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) // Not allowed to remove 'default' tenant - .filter(tenantName -> !tenantName.equals(TenantRepository.HOSTED_VESPA_TENANT)) // Not allowed to remove 'hosted-vespa' tenant + .filter(tenantName -> !tenantName.equals(HOSTED_VESPA_TENANT)) // Not allowed to remove 'hosted-vespa' tenant .filter(tenantName -> tenantRepository.getTenant(tenantName).getCreatedTime().isBefore(now.minus(ttlForUnusedTenant))) .peek(tenantRepository::deleteTenant) .collect(Collectors.toSet()); @@ -777,7 +778,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye static Version decideVersion(ApplicationId application, Environment environment, Version sessionVersion, boolean bootstrap) { if ( environment.isManuallyDeployed() && sessionVersion.getMajor() == Vtag.currentVersion.getMajor() - && ! "hosted-vespa".equals(application.tenant().value()) // Never change version of system applications + && ! HOSTED_VESPA_TENANT.equals(application.tenant()) // Never change version of system applications && ! application.instance().isTester() // Never upgrade tester containers && ! bootstrap) { // Do not use current version when bootstrapping config server return Vtag.currentVersion; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java index 013fd169f45..9efde8cf673 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveExpirer.java @@ -1,6 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.maintenance; +import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.node.Agent; @@ -50,7 +51,8 @@ public class InactiveExpirer extends Expirer { @Override protected boolean isExpired(Node node) { return super.isExpired(node) - || node.allocation().get().owner().instance().isTester(); + || node.allocation().get().owner().instance().isTester() + || node.type() == NodeType.host; // TODO: Remove after removing tenant hosts from zone-app } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java index 3e42a0efad7..f8dfb6e3e39 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java @@ -3,19 +3,16 @@ package com.yahoo.vespa.hosted.provision.maintenance; import com.yahoo.component.Version; import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.ApplicationName; import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.DockerImage; import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.Flavor; import com.yahoo.config.provision.HostSpec; -import com.yahoo.config.provision.InstanceName; import com.yahoo.config.provision.NodeFlavors; import com.yahoo.config.provision.NodeResources; import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.RegionName; -import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; import com.yahoo.test.ManualClock; import com.yahoo.transaction.NestedTransaction; @@ -40,10 +37,6 @@ import com.yahoo.vespa.orchestrator.Orchestrator; import java.time.Clock; import java.time.Duration; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -59,9 +52,9 @@ public class NodeFailTester { public static final NodeResources nodeResources = new NodeResources(2, 8, 50); // Immutable components - public static final ApplicationId nodeAdminApp = ApplicationId.from(TenantName.from("hosted-vespa"), ApplicationName.from("routing"), InstanceName.from("default")); - public static final ApplicationId app1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz")); - public static final ApplicationId app2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz")); + public static final ApplicationId tenantHostApp = ApplicationId.from("hosted-vespa", "tenant-host", "default"); + public static final ApplicationId app1 = ApplicationId.from("foo1", "bar", "fuz"); + public static final ApplicationId app2 = ApplicationId.from("foo2", "bar", "fuz"); public static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default", "docker"); private static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east")); private static final Duration downtimeLimitOneHour = Duration.ofMinutes(60); @@ -95,8 +88,8 @@ public class NodeFailTester { tester.createHostNodes(3); // Create applications - ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet()); - ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Collections.emptySet()); + ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Set.of()); + ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.42"), false, Set.of()); int wantedNodesApp1 = 5; int wantedNodesApp2 = 7; tester.activate(app1, clusterApp1, wantedNodesApp1); @@ -104,9 +97,9 @@ public class NodeFailTester { assertEquals(wantedNodesApp1, tester.nodeRepository.getNodes(app1, Node.State.active).size()); assertEquals(wantedNodesApp2, tester.nodeRepository.getNodes(app2, Node.State.active).size()); - Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); - apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, Capacity.fromCount(wantedNodesApp1, nodeResources, false, true), 1)); - apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, Capacity.fromCount(wantedNodesApp2, nodeResources, false, true), 1)); + Map<ApplicationId, MockDeployer.ApplicationContext> apps = Map.of( + app1, new MockDeployer.ApplicationContext(app1, clusterApp1, Capacity.fromCount(wantedNodesApp1, nodeResources, false, true), 1), + app2, new MockDeployer.ApplicationContext(app2, clusterApp2, Capacity.fromCount(wantedNodesApp2, nodeResources, false, true), 1)); tester.deployer = new MockDeployer(tester.provisioner, tester.clock(), apps); tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); @@ -125,24 +118,24 @@ public class NodeFailTester { } // Create applications - ClusterSpec clusterNodeAdminApp = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false, Collections.emptySet()); - ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.75.0"), false, Collections.emptySet()); - ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.75.0"), false, Collections.emptySet()); + ClusterSpec clusterNodeAdminApp = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("node-admin"), Version.fromString("6.42"), false, Set.of()); + ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Version.fromString("6.75.0"), false, Set.of()); + ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Version.fromString("6.75.0"), false, Set.of()); Capacity allHosts = Capacity.fromRequiredNodeType(NodeType.host); Capacity capacity1 = Capacity.fromCount(3, new NodeResources(1, 1, 1), false, true); Capacity capacity2 = Capacity.fromCount(5, new NodeResources(1, 1, 1), false, true); - tester.activate(nodeAdminApp, clusterNodeAdminApp, allHosts); + tester.activate(tenantHostApp, clusterNodeAdminApp, allHosts); tester.activate(app1, clusterApp1, capacity1); tester.activate(app2, clusterApp2, capacity2); - assertEquals(new HashSet<>(tester.nodeRepository.getNodes(NodeType.host)), - new HashSet<>(tester.nodeRepository.getNodes(nodeAdminApp, Node.State.active))); + assertEquals(Set.of(tester.nodeRepository.getNodes(NodeType.host)), + Set.of(tester.nodeRepository.getNodes(tenantHostApp, Node.State.active))); assertEquals(capacity1.nodeCount(), tester.nodeRepository.getNodes(app1, Node.State.active).size()); assertEquals(capacity2.nodeCount(), tester.nodeRepository.getNodes(app2, Node.State.active).size()); - Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); - apps.put(nodeAdminApp, new MockDeployer.ApplicationContext(nodeAdminApp, clusterNodeAdminApp, allHosts, 1)); - apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, capacity1, 1)); - apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, capacity2, 1)); + Map<ApplicationId, MockDeployer.ApplicationContext> apps = Map.of( + tenantHostApp, new MockDeployer.ApplicationContext(tenantHostApp, clusterNodeAdminApp, allHosts, 1), + app1, new MockDeployer.ApplicationContext(app1, clusterApp1, capacity1, 1), + app2, new MockDeployer.ApplicationContext(app2, clusterApp2, capacity2, 1)); tester.deployer = new MockDeployer(tester.provisioner, tester.clock(), apps); tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); @@ -174,8 +167,8 @@ public class NodeFailTester { public static NodeFailTester withNoApplications() { NodeFailTester tester = new NodeFailTester(); - tester.deployer = new MockDeployer(tester.provisioner, tester.clock(), Collections.emptyMap()); - tester.serviceMonitor = new ServiceMonitorStub(Collections.emptyMap(), tester.nodeRepository); + tester.deployer = new MockDeployer(tester.provisioner, tester.clock(), Map.of()); + tester.serviceMonitor = new ServiceMonitorStub(Map.of(), tester.nodeRepository); tester.metric = new MetricsReporterTest.TestMetric(); tester.failer = tester.createFailer(); return tester; @@ -202,7 +195,7 @@ public class NodeFailTester { } public void allNodesMakeAConfigRequestExcept(Node ... deadNodeArray) { - allNodesMakeAConfigRequestExcept(Arrays.asList(deadNodeArray)); + allNodesMakeAConfigRequestExcept(List.of(deadNodeArray)); } public void allNodesMakeAConfigRequestExcept(List<Node> deadNodes) { diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/VespaModelUtil.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/VespaModelUtil.java index 61fe871ccc8..71865753cd8 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/VespaModelUtil.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/VespaModelUtil.java @@ -32,9 +32,11 @@ import static com.yahoo.collections.CollectionUtil.first; public class VespaModelUtil { private static final Logger log = Logger.getLogger(VespaModelUtil.class.getName()); + public static final ApplicationId TENANT_HOST_APPLICATION_ID = + ApplicationId.from("hosted-vespa", "tenant-host", "default"); + public static final ApplicationId ZONE_APPLICATION_ID = ApplicationId.from("hosted-vespa", "routing", "default"); - public static final ClusterId ADMIN_CLUSTER_ID = new ClusterId("admin"); public static final ClusterId NODE_ADMIN_CLUSTER_ID = new ClusterId("node-admin"); diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java index 2451ebe6fd9..e31e015e2c8 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicy.java @@ -80,6 +80,10 @@ public class HostedVespaClusterPolicy implements ClusterPolicy { return ConcurrentSuspensionLimitForCluster.ALL_NODES; } + if (clusterApi.getApplication().applicationId().equals(VespaModelUtil.TENANT_HOST_APPLICATION_ID)) { + return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; + } + if (clusterApi.getApplication().applicationId().equals(VespaModelUtil.ZONE_APPLICATION_ID) && clusterApi.clusterId().equals(VespaModelUtil.NODE_ADMIN_CLUSTER_ID)) { return ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT; diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java index c316f79c3d2..4cf39671271 100644 --- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java +++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaClusterPolicyTest.java @@ -62,6 +62,14 @@ public class HostedVespaClusterPolicyTest { } @Test + public void testTenantHostSuspensionLimit() { + when(applicationApi.applicationId()).thenReturn(VespaModelUtil.TENANT_HOST_APPLICATION_ID); + when(clusterApi.isStorageCluster()).thenReturn(false); + assertEquals(ConcurrentSuspensionLimitForCluster.TWENTY_PERCENT, + policy.getConcurrentSuspensionLimit(clusterApi)); + } + + @Test public void testDefaultSuspensionLimit() { when(applicationApi.applicationId()).thenReturn(ApplicationId.fromSerializedForm("a:b:c")); when(clusterApi.clusterId()).thenReturn(new ClusterId("some-cluster-id")); |