aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHÃ¥kon Hallingstad <hakon.hallingstad@gmail.com>2023-11-06 14:26:12 +0100
committerGitHub <noreply@github.com>2023-11-06 14:26:12 +0100
commita6bc8ff27b15e0041dcde208abb1b4ff9d0fe278 (patch)
treedb89c6f549a231c0407f081668066ef7648b5d01
parent93eea4d5a70d05c5bf0cf716f1cd51a2e92c6c25 (diff)
parentf35b21d145e8a085138a4f06060d3511d48be86f (diff)
Merge pull request #29228 from vespa-engine/hakonhall/evaluate-flags-referenced-multiple-times-at-the-start-of-prepare
Evaluate flags referenced multiple times at the start of prepare
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/custom/SharedHost.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java27
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java31
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java15
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java50
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java9
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java92
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationParams.java89
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java46
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java16
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java23
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java40
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java70
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java26
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java68
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java11
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java9
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java6
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java2
25 files changed, 367 insertions, 302 deletions
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/custom/SharedHost.java b/flags/src/main/java/com/yahoo/vespa/flags/custom/SharedHost.java
index 66356d979a4..0c0a510d96e 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/custom/SharedHost.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/custom/SharedHost.java
@@ -36,6 +36,9 @@ public class SharedHost {
this.resources = resourcesOrNull == null ? List.of() : List.copyOf(resourcesOrNull);
}
+ @JsonIgnore
+ public SharedHost() { this(null); }
+
@JsonGetter("resources")
public List<HostResources> getResourcesOrNull() {
return resources.isEmpty() ? null : resources;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index dfbe41e31d7..25919d6a81d 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -12,9 +12,6 @@ import com.yahoo.config.provision.Zone;
import com.yahoo.config.provisioning.NodeRepositoryConfig;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.JacksonFlag;
-import com.yahoo.vespa.flags.PermanentFlags;
-import com.yahoo.vespa.flags.custom.SharedHost;
import com.yahoo.vespa.hosted.provision.Node.State;
import com.yahoo.vespa.hosted.provision.applications.Applications;
import com.yahoo.vespa.hosted.provision.archive.ArchiveUriManager;
@@ -29,6 +26,7 @@ import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import com.yahoo.vespa.hosted.provision.persistence.DnsNameResolver;
import com.yahoo.vespa.hosted.provision.persistence.JobControlFlags;
import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import com.yahoo.vespa.hosted.provision.provisioning.ContainerImages;
import com.yahoo.vespa.hosted.provision.provisioning.FirmwareChecks;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
@@ -67,7 +65,6 @@ public class NodeRepository extends AbstractComponent {
private final MetricsDb metricsDb;
private final Orchestrator orchestrator;
private final int spareCount;
- private final JacksonFlag<SharedHost> sharedHosts;
/**
* Creates a node repository from a zookeeper provider.
@@ -141,7 +138,6 @@ public class NodeRepository extends AbstractComponent {
this.metricsDb = metricsDb;
this.orchestrator = orchestrator;
this.spareCount = spareCount;
- this.sharedHosts = PermanentFlags.SHARED_HOST.bindTo(flagSource());
nodes.rewrite();
}
@@ -201,27 +197,6 @@ public class NodeRepository extends AbstractComponent {
/** The number of nodes we should ensure has free capacity for node failures whenever possible */
public int spareCount() { return spareCount; }
- /** Returns whether nodes must be allocated to hosts that are exclusive to the cluster type. */
- public boolean exclusiveClusterType(ClusterSpec cluster) {
- return sharedHosts.value().hasClusterType(cluster.type().name());
- }
-
- /**
- * Returns whether nodes are allocated exclusively in this instance given this cluster spec.
- * Exclusive allocation requires that the wanted node resources matches the advertised resources of the node
- * perfectly.
- */
- public boolean exclusiveAllocation(ClusterSpec clusterSpec) {
- return clusterSpec.isExclusive() ||
- ( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
- ( !zone().cloud().allowHostSharing() && !sharedHosts.value().supportsClusterType(clusterSpec.type().name()));
- }
-
- /** Whether the nodes of this cluster must be running on hosts that are specifically provisioned for the application. */
- public boolean exclusiveProvisioning(ClusterSpec clusterSpec) {
- return !zone.cloud().allowHostSharing() && clusterSpec.isExclusive();
- }
-
/**
* Returns ACLs for the children of the given host.
*
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
index 544436dc902..4f116630af8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
@@ -9,6 +9,7 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import java.time.Duration;
import java.util.List;
@@ -33,14 +34,12 @@ public class AllocatableResources {
private final double fulfilment;
/** Fake allocatable resources from requested capacity */
- public AllocatableResources(ClusterResources requested,
- ClusterSpec clusterSpec,
- NodeRepository nodeRepository) {
+ public AllocatableResources(AllocationParams params, ClusterResources requested) {
this.nodes = requested.nodes();
this.groups = requested.groups();
- this.realResources = nodeRepository.resourcesCalculator().requestToReal(requested.nodeResources(), nodeRepository.exclusiveAllocation(clusterSpec), false);
+ this.realResources = params.nodeRepository().resourcesCalculator().requestToReal(requested.nodeResources(), params.exclusiveAllocation(), false);
this.advertisedResources = requested.nodeResources();
- this.clusterSpec = clusterSpec;
+ this.clusterSpec = params.cluster();
this.fulfilment = 1;
}
@@ -167,7 +166,8 @@ public class AllocatableResources {
.withBandwidthGbps(sum.bandwidthGbps() / nodes.size());
}
- public static Optional<AllocatableResources> from(ClusterResources wantedResources,
+ public static Optional<AllocatableResources> from(AllocationParams params,
+ ClusterResources wantedResources,
ApplicationId applicationId,
ClusterSpec clusterSpec,
Limits applicationLimits,
@@ -175,10 +175,11 @@ public class AllocatableResources {
ClusterModel model,
NodeRepository nodeRepository) {
var systemLimits = nodeRepository.nodeResourceLimits();
- boolean exclusive = nodeRepository.exclusiveAllocation(clusterSpec);
+ boolean exclusive = params.exclusiveAllocation();
if (! exclusive) {
// We decide resources: Add overhead to what we'll request (advertised) to make sure real becomes (at least) cappedNodeResources
- var allocatableResources = calculateAllocatableResources(wantedResources,
+ var allocatableResources = calculateAllocatableResources(params,
+ wantedResources,
nodeRepository,
applicationId,
clusterSpec,
@@ -189,8 +190,9 @@ public class AllocatableResources {
var worstCaseRealResources = nodeRepository.resourcesCalculator().requestToReal(allocatableResources.advertisedResources,
exclusive,
false);
- if ( ! systemLimits.isWithinRealLimits(worstCaseRealResources, applicationId, clusterSpec)) {
- allocatableResources = calculateAllocatableResources(wantedResources,
+ if ( ! systemLimits.isWithinRealLimits(params, worstCaseRealResources, applicationId, clusterSpec)) {
+ allocatableResources = calculateAllocatableResources(params,
+ wantedResources,
nodeRepository,
applicationId,
clusterSpec,
@@ -199,7 +201,7 @@ public class AllocatableResources {
false);
}
- if ( ! systemLimits.isWithinRealLimits(allocatableResources.realResources, applicationId, clusterSpec))
+ if ( ! systemLimits.isWithinRealLimits(params, allocatableResources.realResources, applicationId, clusterSpec))
return Optional.empty();
if ( ! anySatisfies(allocatableResources.realResources, availableRealHostResources))
return Optional.empty();
@@ -228,7 +230,7 @@ public class AllocatableResources {
}
if ( ! between(applicationLimits.min().nodeResources(), applicationLimits.max().nodeResources(), advertisedResources)) continue;
- if ( ! systemLimits.isWithinRealLimits(realResources, applicationId, clusterSpec)) continue;
+ if ( ! systemLimits.isWithinRealLimits(params, realResources, applicationId, clusterSpec)) continue;
var candidate = new AllocatableResources(wantedResources.with(realResources),
advertisedResources,
@@ -251,7 +253,8 @@ public class AllocatableResources {
}
}
- private static AllocatableResources calculateAllocatableResources(ClusterResources wantedResources,
+ private static AllocatableResources calculateAllocatableResources(AllocationParams params,
+ ClusterResources wantedResources,
NodeRepository nodeRepository,
ApplicationId applicationId,
ClusterSpec clusterSpec,
@@ -263,7 +266,7 @@ public class AllocatableResources {
advertisedResources = systemLimits.enlargeToLegal(advertisedResources, applicationId, clusterSpec, exclusive, true); // Ask for something legal
advertisedResources = applicationLimits.cap(advertisedResources); // Overrides other conditions, even if it will then fail
var realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive, bestCase); // What we'll really get
- if ( ! systemLimits.isWithinRealLimits(realResources, applicationId, clusterSpec)
+ if ( ! systemLimits.isWithinRealLimits(params, realResources, applicationId, clusterSpec)
&& advertisedResources.storageType() == NodeResources.StorageType.any) {
// Since local disk reserves some of the storage, try to constrain to remote disk
advertisedResources = advertisedResources.with(NodeResources.StorageType.remote);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index ff30f9d6163..f8cfcaa1ce3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import java.util.Optional;
@@ -34,7 +35,8 @@ public class AllocationOptimizer {
* @return the best allocation, if there are any possible legal allocations, fulfilling the target
* fully or partially, within the limits
*/
- public Optional<AllocatableResources> findBestAllocation(Load loadAdjustment,
+ public Optional<AllocatableResources> findBestAllocation(AllocationParams params,
+ Load loadAdjustment,
ClusterModel model,
Limits limits) {
if (limits.isEmpty())
@@ -42,12 +44,12 @@ public class AllocationOptimizer {
new ClusterResources(maximumNodes, maximumNodes, NodeResources.unspecified()),
IntRange.empty());
else
- limits = atLeast(minimumNodes, limits).fullySpecified(model.current().clusterSpec(), nodeRepository, model.application().id());
+ limits = atLeast(minimumNodes, limits).fullySpecified(params);
Optional<AllocatableResources> bestAllocation = Optional.empty();
var availableRealHostResources = nodeRepository.zone().cloud().dynamicProvisioning()
? nodeRepository.flavors().getFlavors().stream().map(flavor -> flavor.resources()).toList()
: nodeRepository.nodes().list().hosts().stream().map(host -> host.flavor().resources())
- .map(hostResources -> maxResourcesOf(hostResources, model))
+ .map(hostResources -> maxResourcesOf(params, hostResources, model))
.toList();
for (int groups = limits.min().groups(); groups <= limits.max().groups(); groups++) {
for (int nodes = limits.min().nodes(); nodes <= limits.max().nodes(); nodes++) {
@@ -57,7 +59,8 @@ public class AllocationOptimizer {
groups,
nodeResourcesWith(nodes, groups,
limits, loadAdjustment, model));
- var allocatableResources = AllocatableResources.from(resources,
+ var allocatableResources = AllocatableResources.from(params,
+ resources,
model.application().id(),
model.current().clusterSpec(),
limits,
@@ -73,8 +76,8 @@ public class AllocationOptimizer {
}
/** Returns the max resources of a host one node may allocate. */
- private NodeResources maxResourcesOf(NodeResources hostResources, ClusterModel model) {
- if (nodeRepository.exclusiveAllocation(model.clusterSpec())) return hostResources;
+ private NodeResources maxResourcesOf(AllocationParams params, NodeResources hostResources, ClusterModel model) {
+ if (params.exclusiveAllocation()) return hostResources;
// static, shared hosts: Allocate at most half of the host cpu to simplify management
return hostResources.withVcpu(hostResources.vcpu() / 2);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 738abddc31a..59dfc167d6a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -2,11 +2,13 @@
package com.yahoo.vespa.hosted.provision.autoscale;
import com.yahoo.config.provision.ClusterResources;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling.Status;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import java.time.Duration;
@@ -54,12 +56,14 @@ public class Autoscaler {
}
private Autoscaling autoscale(Application application, Cluster cluster, NodeList clusterNodes, Limits limits) {
- var model = new ClusterModel(nodeRepository,
+ NodeList notRetired = clusterNodes.not().retired();
+ ClusterSpec clusterSpec = notRetired.clusterSpec();
+ AllocationParams params = AllocationParams.from(nodeRepository, application.id(), clusterSpec, clusterNodes.clusterSpec().vespaVersion());
+ var model = new ClusterModel(params,
application,
- clusterNodes.not().retired().clusterSpec(),
cluster,
clusterNodes,
- new AllocatableResources(clusterNodes.not().retired(), nodeRepository),
+ new AllocatableResources(notRetired, nodeRepository),
nodeRepository.metricsDb(),
nodeRepository.clock());
if (model.isEmpty()) return Autoscaling.empty();
@@ -73,7 +77,7 @@ public class Autoscaler {
var loadAdjustment = model.loadAdjustment();
// Ensure we only scale down if we'll have enough headroom to not scale up again given a small load increase
- var target = allocationOptimizer.findBestAllocation(loadAdjustment, model, limits);
+ var target = allocationOptimizer.findBestAllocation(params, loadAdjustment, model, limits);
if (target.isEmpty())
return Autoscaling.dontScale(Status.insufficient, "No allocations are possible within configured limits", model);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index 4c5ace3d51a..de132d53d63 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -8,6 +8,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import java.time.Clock;
import java.time.Duration;
@@ -46,9 +47,8 @@ public class ClusterModel {
// TODO: Measure this, and only take it into account with queries
private static final double fixedCpuCostFraction = 0.1;
- private final NodeRepository nodeRepository;
+ private final AllocationParams params;
private final Application application;
- private final ClusterSpec clusterSpec;
private final Cluster cluster;
private final AllocatableResources current;
@@ -74,31 +74,28 @@ public class ClusterModel {
private Double maxQueryGrowthRate = null;
private OptionalDouble averageQueryRate = null;
- public ClusterModel(NodeRepository nodeRepository,
+ public ClusterModel(AllocationParams params,
Application application,
- ClusterSpec clusterSpec,
Cluster cluster,
NodeList clusterNodes,
AllocatableResources current,
MetricsDb metricsDb,
Clock clock) {
- this.nodeRepository = nodeRepository;
+ this.params = params;
this.application = application;
- this.clusterSpec = clusterSpec;
this.cluster = cluster;
this.nodes = clusterNodes;
this.current = current;
this.clock = clock;
- this.scalingDuration = cluster.scalingDuration(clusterSpec);
- this.allocationDuration = cluster.allocationDuration(clusterSpec);
+ this.scalingDuration = cluster.scalingDuration(params.cluster());
+ this.allocationDuration = cluster.allocationDuration(params.cluster());
this.clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id());
this.nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb);
this.at = clock.instant();
}
- ClusterModel(NodeRepository nodeRepository,
+ ClusterModel(AllocationParams params,
Application application,
- ClusterSpec clusterSpec,
Cluster cluster,
AllocatableResources current,
Clock clock,
@@ -106,9 +103,8 @@ public class ClusterModel {
Duration allocationDuration,
ClusterTimeseries clusterTimeseries,
ClusterNodesTimeseries nodeTimeseries) {
- this.nodeRepository = nodeRepository;
+ this.params = params;
this.application = application;
- this.clusterSpec = clusterSpec;
this.cluster = cluster;
this.nodes = NodeList.of();
this.current = current;
@@ -122,7 +118,7 @@ public class ClusterModel {
}
public Application application() { return application; }
- public ClusterSpec clusterSpec() { return clusterSpec; }
+ public ClusterSpec clusterSpec() { return params.cluster(); }
public AllocatableResources current() { return current; }
private ClusterNodesTimeseries nodeTimeseries() { return nodeTimeseries; }
private ClusterTimeseries clusterTimeseries() { return clusterTimeseries; }
@@ -144,7 +140,7 @@ public class ClusterModel {
public Duration allocationDuration() { return allocationDuration; }
public boolean isContent() {
- return clusterSpec.type().isContent();
+ return params.cluster().type().isContent();
}
/** Returns the predicted duration of data redistribution in this cluster. */
@@ -169,7 +165,7 @@ public class ClusterModel {
}
public boolean isExclusive() {
- return nodeRepository.exclusiveAllocation(clusterSpec);
+ return params.exclusiveAllocation();
}
/** Returns the relative load adjustment that should be made to this cluster given available measurements. */
@@ -277,7 +273,7 @@ public class ClusterModel {
* cluster.bcpGroupInfo().growthRateHeadroom() * trafficShiftHeadroom();
double neededTotalVcpuPerGroup = cluster.bcpGroupInfo().cpuCostPerQuery() * targetQueryRateToHandle / groupCount() +
( 1 - cpu.queryFraction()) * cpu.idealLoad() *
- (clusterSpec.type().isContainer() ? 1 : groupSize());
+ (params.cluster().type().isContainer() ? 1 : groupSize());
// Max 1: Only use bcp group info if it indicates that we need to scale *up*
double cpuAdjustment = Math.max(1.0, neededTotalVcpuPerGroup / currentClusterTotalVcpuPerGroup);
return ideal.withCpu(ideal.cpu() / cpuAdjustment);
@@ -341,7 +337,7 @@ public class ClusterModel {
/** Returns the headroom for growth during organic traffic growth as a multiple of current resources. */
private double growthRateHeadroom() {
- if ( ! nodeRepository.zone().environment().isProduction()) return 1;
+ if ( ! params.nodeRepository().zone().environment().isProduction()) return 1;
double growthRateHeadroom = 1 + maxQueryGrowthRate() * scalingDuration().toMinutes();
// Cap headroom at 10% above the historical observed peak
if (queryFractionOfMax() != 0)
@@ -355,7 +351,7 @@ public class ClusterModel {
* as a multiple of current resources.
*/
private double trafficShiftHeadroom() {
- if ( ! nodeRepository.zone().environment().isProduction()) return 1;
+ if ( ! params.nodeRepository().zone().environment().isProduction()) return 1;
if (canRescaleWithinBcpDeadline()) return 1;
double trafficShiftHeadroom;
if (application.status().maxReadShare() == 0) // No traffic fraction data
@@ -391,7 +387,7 @@ public class ClusterModel {
OptionalDouble costPerQuery() {
if (averageQueryRate().isEmpty() || averageQueryRate().getAsDouble() == 0.0) return OptionalDouble.empty();
// TODO: Query rate should generally be sampled at the time where we see the peak resource usage
- int fanOut = clusterSpec.type().isContainer() ? 1 : groupSize();
+ int fanOut = params.cluster().type().isContainer() ? 1 : groupSize();
return OptionalDouble.of(peakLoad().cpu() * cpu.queryFraction() * fanOut * nodes.not().retired().first().get().resources().vcpu()
/ averageQueryRate().getAsDouble() / groupCount());
}
@@ -414,8 +410,8 @@ public class ClusterModel {
private class MemoryModel {
double idealLoad() {
- if (clusterSpec.type().isContainer()) return idealContainerMemoryLoad;
- if (clusterSpec.type() == ClusterSpec.Type.admin) return idealContainerMemoryLoad; // Not autoscaled, but ideal shown in console
+ if (params.cluster().type().isContainer()) return idealContainerMemoryLoad;
+ if (params.cluster().type() == ClusterSpec.Type.admin) return idealContainerMemoryLoad; // Not autoscaled, but ideal shown in console
return idealContentMemoryLoad;
}
@@ -432,16 +428,12 @@ public class ClusterModel {
double averageReal() {
if (nodes.isEmpty()) { // we're estimating
- var initialResources = new CapacityPolicies(nodeRepository).specifyFully(cluster.minResources().nodeResources(),
- clusterSpec,
- application.id());
- return nodeRepository.resourcesCalculator().requestToReal(initialResources,
- nodeRepository.exclusiveAllocation(clusterSpec),
- false).memoryGb();
+ var initialResources = new CapacityPolicies(params.nodeRepository()).specifyFully(params, cluster.minResources().nodeResources());
+ return params.nodeRepository().resourcesCalculator().requestToReal(initialResources, params.exclusiveAllocation(), false).memoryGb();
}
else {
return nodes.stream()
- .mapToDouble(node -> nodeRepository.resourcesCalculator().realResourcesOf(node, nodeRepository).memoryGb())
+ .mapToDouble(node -> params.nodeRepository().resourcesCalculator().realResourcesOf(node, params.nodeRepository()).memoryGb())
.average()
.getAsDouble();
}
@@ -454,7 +446,7 @@ public class ClusterModel {
double idealLoad() {
// Stateless clusters are not expected to consume more disk over time -
// if they do it is due to logs which will be rotated away right before the disk is full
- return clusterSpec.isStateful() ? idealContentDiskLoad : idealContainerDiskLoad;
+ return params.cluster().isStateful() ? idealContentDiskLoad : idealContainerDiskLoad;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
index ab93e585c88..827744a6e56 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
@@ -10,6 +10,7 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import java.util.Objects;
@@ -65,12 +66,12 @@ public class Limits {
return resources;
}
- public Limits fullySpecified(ClusterSpec clusterSpec, NodeRepository nodeRepository, ApplicationId applicationId) {
+ public Limits fullySpecified(AllocationParams params) {
if (this.isEmpty()) throw new IllegalStateException("Unspecified limits can not be made fully specified");
- var capacityPolicies = new CapacityPolicies(nodeRepository);
- return new Limits(capacityPolicies.specifyFully(min, clusterSpec, applicationId),
- capacityPolicies.specifyFully(max, clusterSpec, applicationId),
+ var capacityPolicies = new CapacityPolicies(params.nodeRepository());
+ return new Limits(capacityPolicies.specifyFully(params, min),
+ capacityPolicies.specifyFully(params, max),
groupSize);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
index 108f8d77837..156be954c61 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
@@ -13,10 +13,7 @@ import com.yahoo.config.provision.NodeType;
import com.yahoo.jdisc.Metric;
import com.yahoo.lang.MutableInteger;
import com.yahoo.transaction.Mutex;
-import com.yahoo.vespa.flags.BooleanFlag;
-import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.ListFlag;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.custom.ClusterCapacity;
@@ -28,6 +25,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
import com.yahoo.vespa.hosted.provision.node.IP;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisionRequest;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner.HostSharing;
@@ -47,6 +45,7 @@ import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
+import static com.yahoo.vespa.hosted.provision.provisioning.NodeCandidate.ExclusivityViolation.YES;
import static java.util.Comparator.comparing;
import static java.util.Comparator.naturalOrder;
import static java.util.stream.Collectors.groupingBy;
@@ -62,7 +61,6 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
private final HostProvisioner hostProvisioner;
private final ListFlag<ClusterCapacity> preprovisionCapacityFlag;
- private final BooleanFlag makeExclusiveFlag;
private final ProvisioningThrottler throttler;
HostCapacityMaintainer(NodeRepository nodeRepository,
@@ -73,7 +71,6 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
super(nodeRepository, interval, metric);
this.hostProvisioner = hostProvisioner;
this.preprovisionCapacityFlag = PermanentFlags.PREPROVISION_CAPACITY.bindTo(flagSource);
- this.makeExclusiveFlag = Flags.MAKE_EXCLUSIVE.bindTo(flagSource);
this.throttler = new ProvisioningThrottler(nodeRepository, metric);
}
@@ -159,12 +156,13 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
}
private List<Node> provision(NodeList nodeList) {
- return provisionUntilNoDeficit(nodeList).stream()
- .sorted(comparing(node -> node.history().events().stream()
- .map(History.Event::at)
- .min(naturalOrder())
- .orElse(Instant.MIN)))
- .toList();
+ return provisionUntilNoDeficit(nodeList)
+ .stream()
+ .sorted(comparing(node -> node.history().events().stream()
+ .map(History.Event::at)
+ .min(naturalOrder())
+ .orElse(Instant.MIN)))
+ .toList();
}
private static boolean canRemoveHost(Node host) {
@@ -191,12 +189,9 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
* should be sufficient (avoid no stack trace).
*/
private List<Node> provisionUntilNoDeficit(NodeList nodeList) {
+ // Ensure flags for allocation params are evaluated before provision loop. Use a dummy cluster spec
+ var params = AllocationParams.from(nodeRepository(), ApplicationId.defaultId(), asClusterSpec(Optional.empty(), 0), Vtag.currentVersion);
List<ClusterCapacity> preprovisionCapacity = preprovisionCapacityFlag.value();
- ApplicationId application = ApplicationId.defaultId();
- boolean makeExclusive = makeExclusiveFlag.with(FetchVector.Dimension.TENANT_ID, application.tenant().value())
- .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm())
- .with(FetchVector.Dimension.VESPA_VERSION, Vtag.currentVersion.toFullString())
- .value();
// Worst-case each ClusterCapacity in preprovisionCapacity will require an allocation.
int maxProvisions = preprovisionCapacity.size();
@@ -204,7 +199,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
var nodesPlusProvisioned = new ArrayList<>(nodeList.asList());
for (int numProvisions = 0;; ++numProvisions) {
var nodesPlusProvisionedPlusAllocated = new ArrayList<>(nodesPlusProvisioned);
- Optional<ClusterCapacity> deficit = allocatePreprovisionCapacity(application, preprovisionCapacity, nodesPlusProvisionedPlusAllocated, makeExclusive);
+ Optional<ClusterCapacity> deficit = allocatePreprovisionCapacity(params, preprovisionCapacity, nodesPlusProvisionedPlusAllocated);
if (deficit.isEmpty()) {
return nodesPlusProvisionedPlusAllocated;
}
@@ -214,21 +209,22 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
}
ClusterCapacity clusterCapacityDeficit = deficit.get();
- nodesPlusProvisioned.addAll(provisionHosts(clusterCapacityDeficit.count(),
+ nodesPlusProvisioned.addAll(provisionHosts(params,
+ clusterCapacityDeficit.count(),
toNodeResources(clusterCapacityDeficit),
Optional.ofNullable(clusterCapacityDeficit.clusterType()),
nodeList));
}
}
- private List<Node> provisionHosts(int count, NodeResources nodeResources, Optional<String> clusterType, NodeList allNodes) {
+ private List<Node> provisionHosts(AllocationParams params, int count, NodeResources nodeResources, Optional<String> clusterType, NodeList allNodes) {
try {
if (throttler.throttle(allNodes, Agent.HostCapacityMaintainer)) {
throw new NodeAllocationException("Host provisioning is being throttled", true);
}
Version osVersion = nodeRepository().osVersions().targetFor(NodeType.host).orElse(Version.emptyVersion);
List<Integer> provisionIndices = nodeRepository().database().readProvisionIndices(count);
- HostSharing sharingMode = nodeRepository().exclusiveAllocation(asSpec(clusterType, 0)) ? HostSharing.exclusive : HostSharing.shared;
+ HostSharing sharingMode = params.exclusiveAllocation() ? HostSharing.exclusive : HostSharing.shared;
HostProvisionRequest request = new HostProvisionRequest(provisionIndices, NodeType.host, nodeResources,
ApplicationId.defaultId(), osVersion,
sharingMode, clusterType.map(ClusterSpec.Type::valueOf), Optional.empty(),
@@ -236,15 +232,16 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
List<Node> hosts = new ArrayList<>();
Runnable waiter;
try (var lock = nodeRepository().nodes().lockUnallocated()) {
- waiter = hostProvisioner.provisionHosts(request,
- resources -> true,
- provisionedHosts -> {
- hosts.addAll(provisionedHosts.stream()
- .map(host -> host.generateHost(Duration.ZERO))
- .map(host -> host.withExclusiveToApplicationId(null))
- .toList());
- nodeRepository().nodes().addNodes(hosts, Agent.HostCapacityMaintainer);
- });
+ waiter = hostProvisioner.provisionHosts(params.sharedHost(),
+ request,
+ resources -> true,
+ provisionedHosts -> {
+ hosts.addAll(provisionedHosts.stream()
+ .map(host -> host.generateHost(Duration.ZERO))
+ .map(host -> host.withExclusiveToApplicationId(null))
+ .toList());
+ nodeRepository().nodes().addNodes(hosts, Agent.HostCapacityMaintainer);
+ });
}
waiter.run();
return hosts;
@@ -263,14 +260,18 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
* they are added to {@code mutableNodes}
* @return the part of a cluster capacity it was unable to allocate, if any
*/
- private Optional<ClusterCapacity> allocatePreprovisionCapacity(ApplicationId application,
+ private Optional<ClusterCapacity> allocatePreprovisionCapacity(AllocationParams params,
List<ClusterCapacity> preprovisionCapacity,
- ArrayList<Node> mutableNodes,
- boolean makeExclusive) {
+ ArrayList<Node> mutableNodes) {
for (int clusterIndex = 0; clusterIndex < preprovisionCapacity.size(); ++clusterIndex) {
ClusterCapacity clusterCapacity = preprovisionCapacity.get(clusterIndex);
+
+ params = params.with(asClusterSpec(Optional.ofNullable(clusterCapacity.clusterType()), clusterIndex));
+ if (params.exclusiveProvisioning())
+ throw new IllegalStateException("Preprovision cluster requires exclusive provisioning: " + clusterCapacity);
+
LockedNodeList allNodes = new LockedNodeList(mutableNodes, () -> {});
- List<Node> candidates = findCandidates(application, clusterCapacity, clusterIndex, allNodes, makeExclusive);
+ List<Node> candidates = findCandidates(params, clusterCapacity, allNodes);
int deficit = Math.max(0, clusterCapacity.count() - candidates.size());
if (deficit > 0) {
return Optional.of(clusterCapacity.withCount(deficit));
@@ -283,45 +284,34 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
return Optional.empty();
}
- private List<Node> findCandidates(ApplicationId application, ClusterCapacity clusterCapacity, int clusterIndex, LockedNodeList allNodes, boolean makeExclusive) {
+ private List<Node> findCandidates(AllocationParams params, ClusterCapacity clusterCapacity, LockedNodeList allNodes) {
NodeResources nodeResources = toNodeResources(clusterCapacity);
// We'll allocate each ClusterCapacity as a unique cluster in a dummy application
- ClusterSpec cluster = asSpec(Optional.ofNullable(clusterCapacity.clusterType()), clusterIndex);
NodeSpec nodeSpec = NodeSpec.from(clusterCapacity.count(), 1, nodeResources, false, true,
nodeRepository().zone().cloud().account(), Duration.ZERO);
var allocationContext = IP.Allocation.Context.from(nodeRepository().zone().cloud().name(),
nodeSpec.cloudAccount().isExclave(nodeRepository().zone()),
nodeRepository().nameResolver());
- NodePrioritizer prioritizer = new NodePrioritizer(allNodes, application, cluster, nodeSpec,
- true, false, allocationContext, nodeRepository().nodes(),
- nodeRepository().resourcesCalculator(), nodeRepository().spareCount(),
- nodeRepository().exclusiveAllocation(cluster), makeExclusive);
+ NodePrioritizer prioritizer = new NodePrioritizer(params, allNodes, nodeSpec, true, false, allocationContext, nodeRepository().nodes(),
+ nodeRepository().resourcesCalculator(), nodeRepository().spareCount());
List<NodeCandidate> nodeCandidates = prioritizer.collect()
.stream()
- .filter(node -> node.violatesExclusivity(cluster,
- application,
- nodeRepository().exclusiveClusterType(cluster),
- nodeRepository().exclusiveAllocation(cluster),
- false,
- nodeRepository().zone().cloud().allowHostSharing(),
- allNodes,
- makeExclusive)
- != NodeCandidate.ExclusivityViolation.YES)
+ .filter(node -> node.violatesExclusivity(params, allNodes) != YES)
.toList();
MutableInteger index = new MutableInteger(0);
return nodeCandidates
.stream()
.limit(clusterCapacity.count())
.map(candidate -> candidate.toNode()
- .allocate(application,
- ClusterMembership.from(cluster, index.next()),
+ .allocate(params.application(),
+ ClusterMembership.from(params.cluster(), index.next()),
nodeResources,
nodeRepository().clock().instant()))
.toList();
}
- private static ClusterSpec asSpec(Optional<String> clusterType, int index) {
+ private static ClusterSpec asClusterSpec(Optional<String> clusterType, int index) {
return ClusterSpec.request(clusterType.map(ClusterSpec.Type::from).orElse(ClusterSpec.Type.content),
ClusterSpec.Id.from(String.valueOf(index)))
.vespaVersion(Vtag.currentVersion) // Needed, but should not be used here.
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
index b6897d5b1c9..e7b86c32618 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
@@ -1,6 +1,7 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.NodeResources;
@@ -12,6 +13,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Allocation;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import java.time.Duration;
import java.util.HashSet;
@@ -71,7 +73,9 @@ public class HostFlavorUpgrader extends NodeRepositoryMaintainer {
if (parent.isEmpty()) continue;
if (exhaustedFlavors.contains(parent.get().flavor().name())) continue;
Allocation allocation = node.allocation().get();
- Predicate<NodeResources> realHostResourcesWithinLimits = resources -> nodeRepository().nodeResourceLimits().isWithinRealLimits(resources, allocation.owner(), allocation.membership().cluster());
+ ClusterSpec cluster = allocation.membership().cluster();
+ var params = AllocationParams.from(nodeRepository(), allocation.owner(), cluster, cluster.vespaVersion());
+ Predicate<NodeResources> realHostResourcesWithinLimits = resources -> nodeRepository().nodeResourceLimits().isWithinRealLimits(params, resources, allocation.owner(), cluster);
if (!hostProvisioner.canUpgradeFlavor(parent.get(), node, realHostResourcesWithinLimits)) continue;
if (parent.get().status().wantToUpgradeFlavor() && allocation.membership().retired()) continue; // Already upgrading
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationParams.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationParams.java
new file mode 100644
index 00000000000..59a1f7b025a
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationParams.java
@@ -0,0 +1,89 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.provisioning;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.flags.FetchVector;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.PermanentFlags;
+import com.yahoo.vespa.flags.custom.SharedHost;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
+
+import static java.util.Objects.requireNonNull;
+
+/**
+ * Miscellaneous parameters for the preparation of an allocation of a cluster.
+ *
+ * <p>Ideal for feature flags that guards new code paths in various parts of the allocation code.</p>
+ *
+ * @param exclusiveClusterType whether nodes must be allocated to hosts that are exclusive to the cluster type
+ * @param exclusiveAllocation whether nodes are allocated exclusively in this instance given this cluster spec.
+ * Exclusive allocation requires that the wanted node resources matches the advertised
+ * resources of the node perfectly
+ * @param exclusiveProvisioning Whether the nodes of this cluster must be running on hosts that are specifically provisioned for the application
+ * @param sharedHost snapshot of shared-host flag
+ * @param makeExclusive snapshot of make-exclusive flag
+ * @author hakonhall
+ */
+public record AllocationParams(NodeRepository nodeRepository,
+ ApplicationId application,
+ ClusterSpec cluster,
+ boolean exclusiveClusterType,
+ boolean exclusiveAllocation,
+ boolean exclusiveProvisioning,
+ SharedHost sharedHost,
+ boolean makeExclusive) {
+
+ public AllocationParams {
+ requireNonNull(nodeRepository, "nodeRepository cannot be null");
+ requireNonNull(application, "application cannot be null");
+ requireNonNull(cluster, "cluster cannot be null");
+ requireNonNull(sharedHost, "sharedHost cannot be null");
+ }
+
+ /** The canonical way of constructing an instance: ensures consistencies between the various parameters. */
+ public static AllocationParams from(NodeRepository nodeRepository, ApplicationId application, ClusterSpec cluster, Version version) {
+ return from(nodeRepository,
+ application,
+ cluster,
+ PermanentFlags.SHARED_HOST.bindTo(nodeRepository.flagSource()).value(),
+ Flags.MAKE_EXCLUSIVE.bindTo(nodeRepository.flagSource())
+ .with(FetchVector.Dimension.TENANT_ID, application.tenant().value())
+ .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm())
+ .with(FetchVector.Dimension.VESPA_VERSION, version.toFullString())
+ .value());
+ }
+
+ /**
+ * Returns the same allocation parameters, but as-if it was built with the given cluster. Flags are NOT re-evaluated,
+ * but exclusivity may change.
+ */
+ public AllocationParams with(ClusterSpec cluster) { return from(nodeRepository, application, cluster, sharedHost, makeExclusive); }
+
+ private static AllocationParams from(NodeRepository nodeRepository, ApplicationId application, ClusterSpec cluster, SharedHost sharedHost, boolean makeExclusive) {
+ return new AllocationParams(nodeRepository,
+ application,
+ cluster,
+ exclusiveClusterType(cluster, sharedHost),
+ exclusiveAllocation(nodeRepository.zone(), cluster, sharedHost),
+ exclusiveProvisioning(nodeRepository.zone(), cluster),
+ sharedHost,
+ makeExclusive);
+ }
+
+ private static boolean exclusiveClusterType(ClusterSpec cluster, SharedHost sharedHost) {
+ return sharedHost.hasClusterType(cluster.type().name());
+ }
+
+ private static boolean exclusiveAllocation(Zone zone, ClusterSpec cluster, SharedHost sharedHost) {
+ return cluster.isExclusive() ||
+ ( cluster.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
+ ( !zone.cloud().allowHostSharing() && !sharedHost.supportsClusterType(cluster.type().name()));
+ }
+
+ private static boolean exclusiveProvisioning(Zone zone, ClusterSpec clusterSpec) {
+ return !zone.cloud().allowHostSharing() && clusterSpec.isExclusive();
+ }
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 1e9adea4e95..1d1be35aee8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -10,6 +10,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeResources.DiskSpeed;
+import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.StringFlag;
@@ -86,47 +87,47 @@ public class CapacityPolicies {
return target;
}
- public ClusterResources specifyFully(ClusterResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
- return resources.with(specifyFully(resources.nodeResources(), clusterSpec, applicationId));
+ public ClusterResources specifyFully(AllocationParams params, ClusterResources resources) {
+ return resources.with(specifyFully(params, resources.nodeResources()));
}
- public NodeResources specifyFully(NodeResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
- NodeResources amended = resources.withUnspecifiedFieldsFrom(defaultResources(clusterSpec, applicationId).with(DiskSpeed.any));
+ public NodeResources specifyFully(AllocationParams params, NodeResources resources) {
+ NodeResources amended = resources.withUnspecifiedFieldsFrom(defaultResources(params).with(DiskSpeed.any));
// TODO jonmv: remove this after all apps are 8.248.8 or above; architecture for admin nodes was not picked up before this.
- if (clusterSpec.vespaVersion().isBefore(Version.fromString("8.248.8"))) amended = amended.with(resources.architecture());
+ if (params.cluster().vespaVersion().isBefore(Version.fromString("8.248.8"))) amended = amended.with(resources.architecture());
return amended;
}
- private NodeResources defaultResources(ClusterSpec clusterSpec, ApplicationId applicationId) {
- if (clusterSpec.type() == ClusterSpec.Type.admin) {
- Architecture architecture = adminClusterArchitecture(applicationId);
+ private NodeResources defaultResources(AllocationParams params) {
+ if (params.cluster().type() == ClusterSpec.Type.admin) {
+ Architecture architecture = adminClusterArchitecture(params.application());
- if (nodeRepository.exclusiveAllocation(clusterSpec)) {
+ if (params.exclusiveAllocation()) {
return smallestExclusiveResources().with(architecture);
}
- if (clusterSpec.id().value().equals("cluster-controllers")) {
- return clusterControllerResources(clusterSpec, architecture).with(architecture);
+ if (params.cluster().id().value().equals("cluster-controllers")) {
+ return clusterControllerResources(params.cluster(), architecture).with(architecture);
}
- if (clusterSpec.id().value().equals("logserver")) {
+ if (params.cluster().id().value().equals("logserver")) {
return logserverResources(architecture).with(architecture);
}
- return versioned(clusterSpec, Map.of(new Version(0), smallestSharedResources())).with(architecture);
+ return versioned(params.cluster(), Map.of(new Version(0), smallestSharedResources())).with(architecture);
}
- if (clusterSpec.type() == ClusterSpec.Type.content) {
+ if (params.cluster().type() == ClusterSpec.Type.content) {
// When changing defaults here update cloud.vespa.ai/en/reference/services
return zone.cloud().dynamicProvisioning()
- ? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2, 16, 300, 0.3)))
- : versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
+ ? versioned(params.cluster(), Map.of(new Version(0), new NodeResources(2, 16, 300, 0.3)))
+ : versioned(params.cluster(), Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
}
else {
// When changing defaults here update cloud.vespa.ai/en/reference/services
return zone.cloud().dynamicProvisioning()
- ? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2.0, 8, 50, 0.3)))
- : versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
+ ? versioned(params.cluster(), Map.of(new Version(0), new NodeResources(2.0, 8, 50, 0.3)))
+ : versioned(params.cluster(), Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
}
}
@@ -177,10 +178,11 @@ public class CapacityPolicies {
}
/** Returns whether the nodes requested can share physical host with other applications */
- public ClusterSpec decideExclusivity(Capacity capacity, ClusterSpec requestedCluster) {
- if (capacity.cloudAccount().isPresent()) return requestedCluster.withExclusivity(true); // Implicit exclusive
- boolean exclusive = requestedCluster.isExclusive() && (capacity.isRequired() || zone.environment() == Environment.prod);
- return requestedCluster.withExclusivity(exclusive);
+ public boolean decideExclusivity(Capacity capacity, ClusterSpec requestedCluster) {
+ if (capacity.type() != NodeType.tenant) return true;
+ if (capacity.cloudAccount().isPresent()) return true;
+ if (!requestedCluster.isExclusive()) return false;
+ return capacity.isRequired() || zone.environment() == Environment.prod;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
index 38cbfa7fe5f..ebb7081e366 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.HostEvent;
import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.NodeResources;
+import com.yahoo.vespa.flags.custom.SharedHost;
import com.yahoo.vespa.hosted.provision.Node;
import java.util.Collection;
@@ -53,7 +54,10 @@ public interface HostProvisioner {
* @return a runnable that waits for the provisioning request to finish. It can be run without holding any locks,
* but may fail with an exception that should be propagated to the user initiating prepare()
*/
- Runnable provisionHosts(HostProvisionRequest request, Predicate<NodeResources> realHostResourcesWithinLimits, Consumer<List<ProvisionedHost>> whenProvisioned) throws NodeAllocationException;
+ Runnable provisionHosts(SharedHost sharedHost,
+ HostProvisionRequest request,
+ Predicate<NodeResources> realHostResourcesWithinLimits,
+ Consumer<List<ProvisionedHost>> whenProvisioned) throws NodeAllocationException;
/**
* Continue provisioning of given list of Nodes.
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
index 21340baf273..e22c039196c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
@@ -84,10 +84,10 @@ class NodeAllocation {
private final NodeRepository nodeRepository;
private final Optional<String> requiredHostFlavor;
- private final boolean makeExclusive;
+ private final AllocationParams params;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requested,
- Supplier<Integer> nextIndex, NodeRepository nodeRepository, boolean makeExclusive) {
+ Supplier<Integer> nextIndex, NodeRepository nodeRepository, AllocationParams params) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
@@ -100,7 +100,7 @@ class NodeAllocation {
.with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value())
.value())
.filter(s -> !s.isBlank());
- this.makeExclusive = makeExclusive;
+ this.params = params;
}
/**
@@ -133,7 +133,7 @@ class NodeAllocation {
}
}
else if ( ! saturated() && hasCompatibleResources(candidate)) {
- if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(candidate, application, cluster)) {
+ if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(params, candidate, application, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
@@ -169,7 +169,7 @@ class NodeAllocation {
boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false);
return alreadyRetired ? Retirement.alreadyRetired : Retirement.none;
}
- if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(candidate, application, cluster)) return Retirement.outsideRealLimits;
+ if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(params, candidate, application, cluster)) return Retirement.outsideRealLimits;
if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy;
if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources;
if (candidate.parent.map(node -> node.status().wantToUpgradeFlavor()).orElse(false)) return Retirement.violatesHostFlavorGeneration;
@@ -197,11 +197,7 @@ class NodeAllocation {
}
private NodeCandidate.ExclusivityViolation violatesExclusivity(NodeCandidate candidate) {
- return candidate.violatesExclusivity(cluster, application,
- nodeRepository.exclusiveClusterType(cluster),
- nodeRepository.exclusiveAllocation(cluster),
- nodeRepository.exclusiveProvisioning(cluster),
- nodeRepository.zone().cloud().allowHostSharing(), allNodes, makeExclusive);
+ return candidate.violatesExclusivity(params, allNodes);
}
/**
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
index 8c29b40bc26..76c8445eb8c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
@@ -594,9 +594,8 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
PARENT_HOST_NOT_EXCLUSIVE
}
- public ExclusivityViolation violatesExclusivity(ClusterSpec cluster, ApplicationId application,
- boolean exclusiveClusterType, boolean exclusiveAllocation, boolean exclusiveProvisioning,
- boolean hostSharing, NodeList allNodes, boolean makeExclusive) {
+ public ExclusivityViolation violatesExclusivity(AllocationParams params, NodeList allNodes) {
+ boolean hostSharing = params.nodeRepository().zone().getCloud().allowHostSharing();
if (parentHostname().isEmpty()) return ExclusivityViolation.NONE;
if (type() != NodeType.tenant) return ExclusivityViolation.NONE;
@@ -605,34 +604,34 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
// then all the nodes on the host must have the same owner.
for (Node nodeOnHost : allNodes.childrenOf(parentHostname().get())) {
if (nodeOnHost.allocation().isEmpty()) continue;
- if (exclusiveAllocation || nodeOnHost.allocation().get().membership().cluster().isExclusive()) {
- if ( ! nodeOnHost.allocation().get().owner().equals(application)) return ExclusivityViolation.YES;
+ if (params.exclusiveAllocation() || nodeOnHost.allocation().get().membership().cluster().isExclusive()) {
+ if ( ! nodeOnHost.allocation().get().owner().equals(params.application())) return ExclusivityViolation.YES;
}
}
} else {
// the parent is exclusive to another cluster type
- if ( ! emptyOrEqual(parent.flatMap(Node::exclusiveToClusterType), cluster.type()))
+ if ( ! emptyOrEqual(parent.flatMap(Node::exclusiveToClusterType), params.cluster().type()))
return ExclusivityViolation.YES;
// this cluster requires a parent that was provisioned exclusively for this cluster type
- if (exclusiveClusterType && parent.flatMap(Node::exclusiveToClusterType).isEmpty() && makeExclusive)
+ if (params.exclusiveClusterType() && parent.flatMap(Node::exclusiveToClusterType).isEmpty() && params.makeExclusive())
return ExclusivityViolation.YES;
// the parent is provisioned for another application
- if ( ! emptyOrEqual(parent.flatMap(Node::provisionedForApplicationId), application))
+ if ( ! emptyOrEqual(parent.flatMap(Node::provisionedForApplicationId), params.application()))
return ExclusivityViolation.YES;
// this cluster requires a parent that was provisioned for this application
- if (exclusiveProvisioning && parent.flatMap(Node::provisionedForApplicationId).isEmpty())
+ if (params.exclusiveProvisioning() && parent.flatMap(Node::provisionedForApplicationId).isEmpty())
return ExclusivityViolation.YES;
// the parent is exclusive to another application
- if ( ! emptyOrEqual(parent.flatMap(Node::exclusiveToApplicationId), application))
+ if ( ! emptyOrEqual(parent.flatMap(Node::exclusiveToApplicationId), params.application()))
return ExclusivityViolation.YES;
// this cluster requires exclusivity, but the parent is not exclusive
- if (exclusiveAllocation && parent.flatMap(Node::exclusiveToApplicationId).isEmpty())
- return canMakeHostExclusive(makeExclusive, type(), hostSharing) ?
+ if (params.exclusiveAllocation() && parent.flatMap(Node::exclusiveToApplicationId).isEmpty())
+ return canMakeHostExclusive(params.makeExclusive(), type(), hostSharing) ?
ExclusivityViolation.PARENT_HOST_NOT_EXCLUSIVE :
ExclusivityViolation.YES;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index b92d6fb6d18..dc6c0c7afa0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -34,39 +34,33 @@ public class NodePrioritizer {
private final HostCapacity capacity;
private final HostResourcesCalculator calculator;
private final NodeSpec requested;
- private final ApplicationId application;
- private final ClusterSpec clusterSpec;
private final IP.Allocation.Context ipAllocationContext;
private final Nodes nodes;
private final boolean dynamicProvisioning;
private final boolean allowHostSharing;
- private final boolean exclusiveAllocation;
- private final boolean makeExclusive;
+ private final AllocationParams params;
private final boolean canAllocateToSpareHosts;
private final boolean topologyChange;
private final int currentClusterSize;
private final Set<Node> spareHosts;
- public NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec,
+ public NodePrioritizer(AllocationParams params, LockedNodeList allNodes, NodeSpec nodeSpec,
boolean dynamicProvisioning, boolean allowHostSharing, IP.Allocation.Context ipAllocationContext, Nodes nodes,
- HostResourcesCalculator hostResourcesCalculator, int spareCount, boolean exclusiveAllocation, boolean makeExclusive) {
+ HostResourcesCalculator hostResourcesCalculator, int spareCount) {
this.allNodes = allNodes;
this.calculator = hostResourcesCalculator;
this.capacity = new HostCapacity(this.allNodes, hostResourcesCalculator);
this.requested = nodeSpec;
- this.clusterSpec = clusterSpec;
- this.application = application;
this.dynamicProvisioning = dynamicProvisioning;
this.allowHostSharing = allowHostSharing;
- this.exclusiveAllocation = exclusiveAllocation;
- this.makeExclusive = makeExclusive;
+ this.params = params;
this.spareHosts = dynamicProvisioning ?
capacity.findSpareHostsInDynamicallyProvisionedZones(this.allNodes.asList()) :
capacity.findSpareHosts(this.allNodes.asList(), spareCount);
this.ipAllocationContext = ipAllocationContext;
this.nodes = nodes;
- NodeList nodesInCluster = this.allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id());
+ NodeList nodesInCluster = this.allNodes.owner(params.application()).type(params.cluster().type()).cluster(params.cluster().id());
NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired();
long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream()
.flatMap(node -> node.allocation()
@@ -81,7 +75,7 @@ public class NodePrioritizer {
// In dynamically provisioned zones, we can always take spare hosts since we can provision new on-demand,
// NodeCandidate::compareTo will ensure that they will not be used until there is no room elsewhere.
// In non-dynamically provisioned zones, we only allow allocating to spare hosts to replace failed nodes.
- this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, clusterSpec.group());
+ this.canAllocateToSpareHosts = dynamicProvisioning || isReplacement(nodesInCluster, params.cluster().group());
}
/** Collects all node candidates for this application and returns them in the most-to-least preferred order */
@@ -125,19 +119,19 @@ public class NodePrioritizer {
for (Node host : allNodes) {
if ( ! nodes.canAllocateTenantNodeTo(host, dynamicProvisioning)) continue;
if (nodes.suspended(host)) continue; // Hosts that are suspended may be down for some time, e.g. for OS upgrade
- if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue;
- if (host.reservedTo().isPresent() && application.instance().isTester()) continue;
- if (makeExclusive) {
- if ( ! allowHostSharing && exclusiveAllocation && ! fitsPerfectly(host)) continue;
+ if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(params.application().tenant())) continue;
+ if (host.reservedTo().isPresent() && params.application().instance().isTester()) continue;
+ if (params.makeExclusive()) {
+ if ( ! allowHostSharing && params.exclusiveAllocation() && ! fitsPerfectly(host)) continue;
} else {
if (host.exclusiveToApplicationId().isPresent() && ! fitsPerfectly(host)) continue;
}
- if ( ! host.provisionedForApplicationId().map(application::equals).orElse(true)) continue;
- if ( ! host.exclusiveToApplicationId().map(application::equals).orElse(true)) continue;
- if ( ! host.exclusiveToClusterType().map(clusterSpec.type()::equals).orElse(true)) continue;
+ if ( ! host.provisionedForApplicationId().map(params.application()::equals).orElse(true)) continue;
+ if ( ! host.exclusiveToApplicationId().map(params.application()::equals).orElse(true)) continue;
+ if ( ! host.exclusiveToClusterType().map(params.cluster().type()::equals).orElse(true)) continue;
if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue;
if ( ! capacity.hasCapacity(host, requested.resources().get())) continue;
- if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue;
+ if ( ! allNodes.childrenOf(host).owner(params.application()).cluster(params.cluster().id()).isEmpty()) continue;
if ( ! requested.cloudAccount().isUnspecified() && ! requested.cloudAccount().equals(host.cloudAccount())) continue;
candidates.add(NodeCandidate.createNewChild(requested.resources().get(),
@@ -160,8 +154,8 @@ public class NodePrioritizer {
.filter(node -> node.type() == requested.type())
.filter(node -> legalStates.contains(node.state()))
.filter(node -> node.allocation().isPresent())
- .filter(node -> node.allocation().get().owner().equals(application))
- .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterSpec.id()))
+ .filter(node -> node.allocation().get().owner().equals(params.application()))
+ .filter(node -> node.allocation().get().membership().cluster().id().equals(params.cluster().id()))
.filter(node -> node.state() == Node.State.active || canStillAllocate(node))
.map(node -> candidateFrom(node, false))
.forEach(candidates::add);
@@ -191,7 +185,7 @@ public class NodePrioritizer {
parent.exclusiveToApplicationId().isEmpty()
&& requested.canResize(node.resources(),
capacity.unusedCapacityOf(parent),
- clusterSpec.type(),
+ params.cluster().type(),
topologyChange,
currentClusterSize));
} else {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index e2f1b7358cf..23065055228 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -85,41 +85,42 @@ public class NodeRepositoryProvisioner implements Provisioner {
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
log.log(Level.FINE, "Received deploy prepare request for " + requested +
" for application " + application + ", cluster " + cluster);
- validate(application, cluster, requested, logger);
+ var params = AllocationParams.from(nodeRepository, application, cluster, cluster.vespaVersion());
+ validate(params, requested, logger);
+
+ params = params.with(cluster.withExclusivity(capacityPolicies.decideExclusivity(requested, cluster)));
NodeResources resources;
NodeSpec nodeSpec;
if (requested.type() == NodeType.tenant) {
- cluster = capacityPolicies.decideExclusivity(requested, cluster);
Capacity actual = capacityPolicies.applyOn(requested, application, cluster.isExclusive());
- ClusterResources target = decideTargetResources(application, cluster, actual);
+ ClusterResources target = decideTargetResources(params, actual);
validate(actual, target, cluster, application);
logIfDownscaled(requested.minResources().nodes(), actual.minResources().nodes(), cluster, logger);
- resources = getNodeResources(cluster, target.nodeResources(), application);
+ resources = getNodeResources(params, cluster, target.nodeResources(), application);
nodeSpec = NodeSpec.from(target.nodes(), target.groups(), resources, cluster.isExclusive(), actual.canFail(),
requested.cloudAccount().orElse(nodeRepository.zone().cloud().account()),
requested.clusterInfo().hostTTL());
}
else {
- cluster = cluster.withExclusivity(true);
- resources = getNodeResources(cluster, requested.minResources().nodeResources(), application);
+ resources = getNodeResources(params, cluster, requested.minResources().nodeResources(), application);
nodeSpec = NodeSpec.from(requested.type(), nodeRepository.zone().cloud().account());
}
- return asSortedHosts(preparer.prepare(application, cluster, nodeSpec),
+ return asSortedHosts(preparer.prepare(params, application, cluster, nodeSpec),
requireCompatibleResources(resources, cluster));
}
- private void validate(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
- if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");
+ private void validate(AllocationParams params, Capacity requested, ProvisionLogger logger) {
+ if (params.cluster().group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");
- nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits("Min", requested.minResources().nodeResources(), application, cluster);
- nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits("Max", requested.maxResources().nodeResources(), application, cluster);
+ nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits(params, "Min", requested.minResources().nodeResources());
+ nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits(params, "Max", requested.maxResources().nodeResources());
if (!requested.minResources().nodeResources().gpuResources().equals(requested.maxResources().nodeResources().gpuResources()))
throw new IllegalArgumentException(requested + " is invalid: GPU capacity cannot have ranges");
- logInsufficientDiskResources(cluster, requested, logger);
+ logInsufficientDiskResources(params.cluster(), requested, logger);
}
private void logInsufficientDiskResources(ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
@@ -132,8 +133,8 @@ public class NodeRepositoryProvisioner implements Provisioner {
}
}
- private NodeResources getNodeResources(ClusterSpec cluster, NodeResources nodeResources, ApplicationId applicationId) {
- return capacityPolicies.specifyFully(nodeResources, cluster, applicationId);
+ private NodeResources getNodeResources(AllocationParams params, ClusterSpec cluster, NodeResources nodeResources, ApplicationId applicationId) {
+ return capacityPolicies.specifyFully(params, nodeResources);
}
@Override
@@ -165,41 +166,39 @@ public class NodeRepositoryProvisioner implements Provisioner {
* Returns the target cluster resources, a value between the min and max in the requested capacity,
* and updates the application store with the received min and max.
*/
- private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) {
- try (Mutex lock = nodeRepository.applications().lock(applicationId)) {
- var application = nodeRepository.applications().get(applicationId).orElse(Application.empty(applicationId))
- .withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested);
+ private ClusterResources decideTargetResources(AllocationParams params, Capacity requested) {
+ try (Mutex lock = nodeRepository.applications().lock(params.application())) {
+ var application = nodeRepository.applications().get(params.application()).orElse(Application.empty(params.application()))
+ .withCluster(params.cluster().id(), params.cluster().isExclusive(), requested);
nodeRepository.applications().put(application, lock);
- var cluster = application.cluster(clusterSpec.id()).get();
- return cluster.target().resources().orElseGet(() -> currentResources(application, clusterSpec, cluster, requested));
+ var cluster = application.cluster(params.cluster().id()).get();
+ return cluster.target().resources().orElseGet(() -> currentResources(params, application, cluster, requested));
}
}
/** Returns the current resources of this cluster, or requested min if none */
- private ClusterResources currentResources(Application application,
- ClusterSpec clusterSpec,
- Cluster cluster,
- Capacity requested) {
+ private ClusterResources currentResources(AllocationParams params, Application application, Cluster cluster, Capacity requested) {
NodeList nodes = nodeRepository.nodes().list(Node.State.active).owner(application.id())
- .cluster(clusterSpec.id())
+ .cluster(params.cluster().id())
.not().retired()
.not().removable();
boolean firstDeployment = nodes.isEmpty();
var current =
firstDeployment // start at min, preserve current resources otherwise
- ? new AllocatableResources(initialResourcesFrom(requested, clusterSpec, application.id()), clusterSpec, nodeRepository)
+ ? new AllocatableResources(params, initialResourcesFrom(params, requested))
: new AllocatableResources(nodes, nodeRepository);
- var model = new ClusterModel(nodeRepository, application, clusterSpec, cluster, nodes, current, nodeRepository.metricsDb(), nodeRepository.clock());
- return within(Limits.of(requested), model, firstDeployment);
+ var model = new ClusterModel(params, application, cluster, nodes, current, nodeRepository.metricsDb(), nodeRepository.clock());
+ return within(params, Limits.of(requested), model, firstDeployment);
}
- private ClusterResources initialResourcesFrom(Capacity requested, ClusterSpec clusterSpec, ApplicationId applicationId) {
- return capacityPolicies.specifyFully(requested.minResources(), clusterSpec, applicationId);
+ private ClusterResources initialResourcesFrom(AllocationParams params, Capacity requested) {
+ return capacityPolicies.specifyFully(params, requested.minResources());
}
/** Make the minimal adjustments needed to the current resources to stay within the limits */
- private ClusterResources within(Limits limits,
+ private ClusterResources within(AllocationParams params,
+ Limits limits,
ClusterModel model,
boolean firstDeployment) {
if (limits.min().equals(limits.max())) return limits.min();
@@ -209,10 +208,11 @@ public class NodeRepositoryProvisioner implements Provisioner {
return model.current().advertisedResources();
// Otherwise, find an allocation that preserves the current resources as well as possible
- return allocationOptimizer.findBestAllocation(Load.one(),
+ return allocationOptimizer.findBestAllocation(params,
+ Load.one(),
model,
limits)
- .orElseThrow(() -> newNoAllocationPossible(model.current().clusterSpec(), limits))
+ .orElseThrow(() -> newNoAllocationPossible(params, model.current().clusterSpec(), limits))
.advertisedResources();
}
@@ -277,10 +277,10 @@ public class NodeRepositoryProvisioner implements Provisioner {
return nodeResources;
}
- private IllegalArgumentException newNoAllocationPossible(ClusterSpec spec, Limits limits) {
+ private IllegalArgumentException newNoAllocationPossible(AllocationParams params, ClusterSpec spec, Limits limits) {
StringBuilder message = new StringBuilder("No allocation possible within ").append(limits);
- if (nodeRepository.exclusiveAllocation(spec) && findNearestNodeResources(limits).isPresent())
+ if (params.exclusiveAllocation() && findNearestNodeResources(limits).isPresent())
message.append(". Nearest allowed node resources: ").append(findNearestNodeResources(limits).get());
return new IllegalArgumentException(message.toString());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index cd331cbd5fa..f592851f45e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -31,14 +31,14 @@ public class NodeResourceLimits {
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
- public void ensureWithinAdvertisedLimits(String type, NodeResources requested, ApplicationId applicationId, ClusterSpec cluster) {
- boolean exclusive = nodeRepository.exclusiveAllocation(cluster);
- if (! requested.vcpuIsUnspecified() && requested.vcpu() < minAdvertisedVcpu(applicationId, cluster, exclusive))
- illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu(applicationId, cluster, exclusive));
- if (! requested.memoryGbIsUnspecified() && requested.memoryGb() < minAdvertisedMemoryGb(applicationId, cluster, exclusive))
- illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(applicationId, cluster, exclusive));
+ public void ensureWithinAdvertisedLimits(AllocationParams params, String type, NodeResources requested) {
+ boolean exclusive = params.exclusiveAllocation();
+ if (! requested.vcpuIsUnspecified() && requested.vcpu() < minAdvertisedVcpu(params.application(), params.cluster(), exclusive))
+ illegal(type, "vcpu", "", params.cluster(), requested.vcpu(), minAdvertisedVcpu(params.application(), params.cluster(), exclusive));
+ if (! requested.memoryGbIsUnspecified() && requested.memoryGb() < minAdvertisedMemoryGb(params.application(), params.cluster(), exclusive))
+ illegal(type, "memoryGb", "Gb", params.cluster(), requested.memoryGb(), minAdvertisedMemoryGb(params.application(), params.cluster(), exclusive));
if (! requested.diskGbIsUnspecified() && requested.diskGb() < minAdvertisedDiskGb(requested, exclusive))
- illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, exclusive));
+ illegal(type, "diskGb", "Gb", params.cluster(), requested.diskGb(), minAdvertisedDiskGb(requested, exclusive));
}
// TODO: Remove this when we are ready to fail, not just warn on this. */
@@ -48,17 +48,17 @@ public class NodeResourceLimits {
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
- public boolean isWithinRealLimits(NodeCandidate candidateNode, ApplicationId applicationId, ClusterSpec cluster) {
+ public boolean isWithinRealLimits(AllocationParams params, NodeCandidate candidateNode, ApplicationId applicationId, ClusterSpec cluster) {
if (candidateNode.type() != NodeType.tenant) return true; // Resource limits only apply to tenant nodes
- return isWithinRealLimits(nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository),
+ return isWithinRealLimits(params, nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository),
applicationId, cluster);
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
- public boolean isWithinRealLimits(NodeResources realResources, ApplicationId applicationId, ClusterSpec cluster) {
+ public boolean isWithinRealLimits(AllocationParams params, NodeResources realResources, ApplicationId applicationId, ClusterSpec cluster) {
if (realResources.isUnspecified()) return true;
- if (realResources.vcpu() < minRealVcpu(applicationId, cluster)) return false;
+ if (realResources.vcpu() < minRealVcpu(params)) return false;
if (realResources.memoryGb() < minRealMemoryGb(cluster)) return false;
if (realResources.diskGb() < minRealDiskGb()) return false;
return true;
@@ -115,8 +115,8 @@ public class NodeResourceLimits {
return 4;
}
- private double minRealVcpu(ApplicationId applicationId, ClusterSpec cluster) {
- return minAdvertisedVcpu(applicationId, cluster, nodeRepository.exclusiveAllocation(cluster));
+ private double minRealVcpu(AllocationParams params) {
+ return minAdvertisedVcpu(params.application(), params.cluster(), params.exclusiveAllocation());
}
private static double minRealMemoryGb(ClusterSpec cluster) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index 978a69978b8..2f1f39c3301 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -13,9 +13,6 @@ import com.yahoo.jdisc.Metric;
import com.yahoo.text.internal.SnippetGenerator;
import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.applicationmodel.InfrastructureApplication;
-import com.yahoo.vespa.flags.BooleanFlag;
-import com.yahoo.vespa.flags.FetchVector;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
@@ -50,42 +47,37 @@ public class Preparer {
private final Optional<HostProvisioner> hostProvisioner;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
private final ProvisioningThrottler throttler;
- private final BooleanFlag makeExclusiveFlag;
public Preparer(NodeRepository nodeRepository, Optional<HostProvisioner> hostProvisioner, Optional<LoadBalancerProvisioner> loadBalancerProvisioner, Metric metric) {
this.nodeRepository = nodeRepository;
this.hostProvisioner = hostProvisioner;
this.loadBalancerProvisioner = loadBalancerProvisioner;
this.throttler = new ProvisioningThrottler(nodeRepository, metric);
- this.makeExclusiveFlag = Flags.MAKE_EXCLUSIVE.bindTo(nodeRepository.flagSource());
}
/**
* Ensure sufficient nodes are reserved or active for the given application, group and cluster
*
- * @param application the application we are allocating to
- * @param cluster the cluster and group we are allocating to
- * @param requested a specification of the requested nodes
+ * @param params misc constants used in preparation
+ * @param application the application we are allocating to
+ * @param cluster the cluster and group we are allocating to
+ * @param requested a specification of the requested nodes
* @return the list of nodes this cluster group will have allocated if activated
*/
// Note: This operation may make persisted changes to the set of reserved and inactive nodes,
// but it may not change the set of active nodes, as the active nodes must stay in sync with the
// active config model which is changed on activate
- public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requested) {
+ public List<Node> prepare(AllocationParams params, ApplicationId application, ClusterSpec cluster, NodeSpec requested) {
log.log(Level.FINE, () -> "Preparing " + cluster.type().name() + " " + cluster.id() + " with requested resources " +
requested.resources().orElse(NodeResources.unspecified()));
loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requested));
- boolean makeExclusive = makeExclusiveFlag.with(FetchVector.Dimension.TENANT_ID, application.tenant().value())
- .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm())
- .with(FetchVector.Dimension.VESPA_VERSION, cluster.vespaVersion().toFullString())
- .value();
// Try preparing in memory without global unallocated lock. Most of the time there should be no changes,
// and we can return nodes previously allocated.
LockedNodeList allNodes = nodeRepository.nodes().list(PROBE_LOCK);
NodeIndices indices = new NodeIndices(cluster.id(), allNodes);
- NodeAllocation probeAllocation = prepareAllocation(application, cluster, requested, indices::probeNext, allNodes, makeExclusive);
+ NodeAllocation probeAllocation = prepareAllocation(application, cluster, requested, indices::probeNext, allNodes, params);
if (probeAllocation.fulfilledAndNoChanges()) {
List<Node> acceptedNodes = probeAllocation.finalNodes();
indices.commitProbe();
@@ -93,28 +85,28 @@ public class Preparer {
} else {
// There were some changes, so re-do the allocation with locks
indices.resetProbe();
- return prepareWithLocks(application, cluster, requested, indices, makeExclusive);
+ return prepareWithLocks(application, cluster, requested, indices, params);
}
}
- private ApplicationMutex parentLockOrNull(boolean makeExclusive, NodeType type) {
- return NodeCandidate.canMakeHostExclusive(makeExclusive, type, nodeRepository.zone().cloud().allowHostSharing()) ?
+ private ApplicationMutex parentLockOrNull(AllocationParams params, NodeType type) {
+ return NodeCandidate.canMakeHostExclusive(params.makeExclusive(), type, nodeRepository.zone().cloud().allowHostSharing()) ?
nodeRepository.applications().lock(InfrastructureApplication.withNodeType(type.parentNodeType()).id()) :
null;
}
/// Note that this will write to the node repo.
- private List<Node> prepareWithLocks(ApplicationId application, ClusterSpec cluster, NodeSpec requested, NodeIndices indices, boolean makeExclusive) {
+ private List<Node> prepareWithLocks(ApplicationId application, ClusterSpec cluster, NodeSpec requested, NodeIndices indices, AllocationParams params) {
Runnable waiter = null;
List<Node> acceptedNodes;
try (Mutex lock = nodeRepository.applications().lock(application);
- ApplicationMutex parentLockOrNull = parentLockOrNull(makeExclusive, requested.type());
+ ApplicationMutex parentLockOrNull = parentLockOrNull(params, requested.type());
Mutex allocationLock = nodeRepository.nodes().lockUnallocated()) {
LockedNodeList allNodes = nodeRepository.nodes().list(allocationLock);
- NodeAllocation allocation = prepareAllocation(application, cluster, requested, indices::next, allNodes, makeExclusive);
+ NodeAllocation allocation = prepareAllocation(application, cluster, requested, indices::next, allNodes, params);
NodeType hostType = allocation.nodeType().hostType();
if (canProvisionDynamically(hostType) && allocation.hostDeficit().isPresent()) {
- HostSharing sharing = hostSharing(cluster, hostType);
+ HostSharing sharing = hostSharing(params, hostType);
Version osVersion = nodeRepository.osVersions().targetFor(hostType).orElse(Version.emptyVersion);
NodeAllocation.HostDeficit deficit = allocation.hostDeficit().get();
Set<Node> hosts = new LinkedHashSet<>();
@@ -147,8 +139,9 @@ public class Preparer {
Optional.of(cluster.id()),
requested.cloudAccount(),
deficit.dueToFlavorUpgrade());
- Predicate<NodeResources> realHostResourcesWithinLimits = resources -> nodeRepository.nodeResourceLimits().isWithinRealLimits(resources, application, cluster);
- waiter = hostProvisioner.get().provisionHosts(request, realHostResourcesWithinLimits, whenProvisioned);
+ Predicate<NodeResources> realHostResourcesWithinLimits = resources ->
+ nodeRepository.nodeResourceLimits().isWithinRealLimits(params, resources, application, cluster);
+ waiter = hostProvisioner.get().provisionHosts(params.sharedHost(), request, realHostResourcesWithinLimits, whenProvisioned);
} catch (NodeAllocationException e) {
// Mark the nodes that were written to ZK in the consumer for deprovisioning. While these hosts do
// not exist, we cannot remove them from ZK here because other nodes may already have been
@@ -162,7 +155,7 @@ public class Preparer {
// Non-dynamically provisioned zone with a deficit because we just now retired some nodes.
// Try again, but without retiring
indices.resetProbe();
- List<Node> accepted = prepareWithLocks(application, cluster, cns.withoutRetiring(), indices, makeExclusive);
+ List<Node> accepted = prepareWithLocks(application, cluster, cns.withoutRetiring(), indices, params);
log.warning("Prepared " + application + " " + cluster.id() + " without retirement due to lack of capacity");
return accepted;
}
@@ -194,24 +187,22 @@ public class Preparer {
}
private NodeAllocation prepareAllocation(ApplicationId application, ClusterSpec cluster, NodeSpec requested,
- Supplier<Integer> nextIndex, LockedNodeList allNodes, boolean makeExclusive) {
+ Supplier<Integer> nextIndex, LockedNodeList allNodes, AllocationParams params) {
validateAccount(requested.cloudAccount(), application, allNodes);
- NodeAllocation allocation = new NodeAllocation(allNodes, application, cluster, requested, nextIndex, nodeRepository, makeExclusive);
+ NodeAllocation allocation = new NodeAllocation(allNodes, application, cluster, requested, nextIndex, nodeRepository, params);
var allocationContext = IP.Allocation.Context.from(nodeRepository.zone().cloud().name(),
requested.cloudAccount().isExclave(nodeRepository.zone()),
nodeRepository.nameResolver());
- NodePrioritizer prioritizer = new NodePrioritizer(allNodes,
- application,
- cluster,
+ NodePrioritizer prioritizer = new NodePrioritizer(params,
+ allNodes,
requested,
nodeRepository.zone().cloud().dynamicProvisioning(),
nodeRepository.zone().cloud().allowHostSharing(),
allocationContext,
nodeRepository.nodes(),
nodeRepository.resourcesCalculator(),
- nodeRepository.spareCount(),
- nodeRepository.exclusiveAllocation(cluster),
- makeExclusive);
+ nodeRepository.spareCount()
+ );
allocation.offer(prioritizer.collect());
return allocation;
}
@@ -238,13 +229,12 @@ public class Preparer {
(hostType == NodeType.host || hostType.isConfigServerHostLike());
}
- private HostSharing hostSharing(ClusterSpec cluster, NodeType hostType) {
- if ( hostType.isSharable())
- return nodeRepository.exclusiveProvisioning(cluster) ? HostSharing.provision :
- nodeRepository.exclusiveAllocation(cluster) ? HostSharing.exclusive :
- HostSharing.any;
- else
- return HostSharing.any;
+ private HostSharing hostSharing(AllocationParams params, NodeType hostType) {
+ if (hostType.isSharable()) {
+ if (params.exclusiveProvisioning()) return HostSharing.provision;
+ if (params.exclusiveAllocation()) return HostSharing.exclusive;
+ }
+ return HostSharing.any;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
index 225eb3e4e8d..ded0aaf513e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
@@ -1,6 +1,8 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.restapi;
+import com.yahoo.component.Vtag;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.slime.Cursor;
@@ -13,6 +15,7 @@ import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.autoscale.Limits;
import com.yahoo.vespa.hosted.provision.autoscale.Load;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import java.net.URI;
import java.util.List;
@@ -56,11 +59,13 @@ public class ApplicationSerializer {
NodeRepository nodeRepository,
Cursor clustersObject) {
NodeList nodes = applicationNodes.not().retired().cluster(cluster.id());
+ ClusterSpec clusterSpec = nodes.clusterSpec();
if (nodes.isEmpty()) return;
ClusterResources currentResources = nodes.toResources();
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
- clusterObject.setString("type", nodes.clusterSpec().type().name());
- Limits limits = Limits.of(cluster).fullySpecified(nodes.clusterSpec(), nodeRepository, application.id());
+ clusterObject.setString("type", clusterSpec.type().name());
+ var params = AllocationParams.from(nodeRepository, application.id(), clusterSpec, clusterSpec.vespaVersion());
+ Limits limits = Limits.of(cluster).fullySpecified(params);
toSlime(limits.min(), clusterObject.setObject("min"));
toSlime(limits.max(), clusterObject.setObject("max"));
if ( ! cluster.groupSize().isEmpty())
@@ -70,7 +75,7 @@ public class ApplicationSerializer {
toSlime(cluster.suggested(), clusterObject.setObject("suggested"));
toSlime(cluster.target(), clusterObject.setObject("target"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
- clusterObject.setLong("scalingDuration", cluster.scalingDuration(nodes.clusterSpec()).toMillis());
+ clusterObject.setLong("scalingDuration", cluster.scalingDuration(clusterSpec).toMillis());
}
private static void toSlime(Autoscaling autoscaling, Cursor autoscalingObject) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
index 9080030f026..eddb7fd40ec 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.provision.restapi;
import com.yahoo.component.Version;
+import com.yahoo.component.Vtag;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.CloudAccount;
@@ -46,6 +47,7 @@ import com.yahoo.vespa.hosted.provision.node.filter.NodeOsVersionFilter;
import com.yahoo.vespa.hosted.provision.node.filter.NodeTypeFilter;
import com.yahoo.vespa.hosted.provision.node.filter.ParentHostFilter;
import com.yahoo.vespa.hosted.provision.maintenance.InfraApplicationRedeployer;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import com.yahoo.vespa.hosted.provision.restapi.NodesResponse.ResponseType;
import com.yahoo.vespa.orchestrator.Orchestrator;
import com.yahoo.yolean.Exceptions;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
index b5bb91af71a..40734cc25c4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
@@ -9,9 +9,11 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
+import com.yahoo.vespa.flags.custom.SharedHost;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import com.yahoo.vespa.hosted.provision.provisioning.FatalProvisioningException;
import com.yahoo.vespa.hosted.provision.provisioning.HostIpConfig;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisionRequest;
@@ -73,7 +75,10 @@ public class MockHostProvisioner implements HostProvisioner {
}
@Override
- public Runnable provisionHosts(HostProvisionRequest request, Predicate<NodeResources> realHostResourcesWithinLimits, Consumer<List<ProvisionedHost>> whenProvisioned) throws NodeAllocationException {
+ public Runnable provisionHosts(SharedHost sharedHost,
+ HostProvisionRequest request,
+ Predicate<NodeResources> realHostResourcesWithinLimits,
+ Consumer<List<ProvisionedHost>> whenProvisioned) throws NodeAllocationException {
if (behaviour(Behaviour.failProvisionRequest)) throw new NodeAllocationException("No capacity for provision request", true);
Flavor hostFlavor = hostFlavors.get(request.clusterType().orElse(ClusterSpec.Type.content));
if (hostFlavor == null)
@@ -264,7 +269,7 @@ public class MockHostProvisioner implements HostProvisioner {
/** Fail call to {@link MockHostProvisioner#provision(com.yahoo.vespa.hosted.provision.Node)} */
failProvisioning,
- /** Fail call to {@link MockHostProvisioner#provisionHosts(HostProvisionRequest, Predicate, Consumer)} */
+ /** Fail call to {@link MockHostProvisioner#provisionHosts(SharedHost, HostProvisionRequest, Predicate, Consumer)} */
failProvisionRequest,
/** Fail call to {@link MockHostProvisioner#deprovision(com.yahoo.vespa.hosted.provision.Node)} */
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index d4d34ab66e5..613a0434054 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -389,7 +389,7 @@ public class AutoscalingTest {
.build();
NodeResources defaultResources =
- new CapacityPolicies(fixture.tester().nodeRepository()).specifyFully(NodeResources.unspecified(), fixture.clusterSpec, fixture.applicationId);
+ new CapacityPolicies(fixture.tester().nodeRepository()).specifyFully(fixture.allocationParams, NodeResources.unspecified());
fixture.tester().assertResources("Min number of nodes and default resources",
2, 1, defaultResources,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
index 5e4dfdc974d..6f3b348c5b0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
@@ -10,6 +10,7 @@ import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.Status;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
import org.junit.Test;
@@ -89,10 +90,11 @@ public class ClusterModelTest {
Cluster cluster = cluster();
application = application.with(cluster);
var nodeRepository = new ProvisioningTester.Builder().build().nodeRepository();
- return new ClusterModel(nodeRepository,
+ var params = AllocationParams.from(nodeRepository, application.id(), clusterSpec, clusterSpec.vespaVersion());
+ return new ClusterModel(params,
application.with(status),
- clusterSpec, cluster,
- new AllocatableResources(clusterResources(), clusterSpec, nodeRepository),
+ cluster,
+ new AllocatableResources(params, clusterResources()),
clock, Duration.ofMinutes(10), Duration.ofMinutes(5),
timeseries(cluster,100, queryRate, writeRate, clock),
ClusterNodesTimeseries.empty());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
index 1ecd5736e17..4c34dd1f7c8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
@@ -26,6 +26,7 @@ import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsHostResourcesCalcu
import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsNodeTypes;
import com.yahoo.vespa.hosted.provision.provisioning.DynamicProvisioningTester;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
+import com.yahoo.vespa.hosted.provision.provisioning.AllocationParams;
import java.time.Duration;
import java.util.Arrays;
@@ -40,6 +41,7 @@ import java.util.Optional;
public class Fixture {
final DynamicProvisioningTester tester;
+ final AllocationParams allocationParams;
final ApplicationId applicationId;
final ClusterSpec clusterSpec;
final Capacity capacity;
@@ -52,6 +54,7 @@ public class Fixture {
clusterSpec = builder.cluster;
capacity = builder.capacity;
tester = new DynamicProvisioningTester(builder.zone, builder.resourceCalculator, builder.hostFlavors, builder.flagSource, hostCount);
+ allocationParams = AllocationParams.from(tester.nodeRepository(), builder.application, builder.cluster, builder.cluster.vespaVersion());
var deployCapacity = initialResources.isPresent() ? Capacity.from(initialResources.get()) : capacity;
tester.deploy(builder.application, builder.cluster, deployCapacity);
this.loader = new Loader(this);
@@ -80,9 +83,8 @@ public class Fixture {
public Capacity capacity() { return capacity; }
public ClusterModel clusterModel() {
- return new ClusterModel(tester.nodeRepository(),
+ return new ClusterModel(allocationParams,
application(),
- clusterSpec,
cluster(),
nodes(),
new AllocatableResources(nodes(), tester.nodeRepository()),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
index be2b2ca896a..50696583b88 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
@@ -155,7 +155,7 @@ public class DynamicProvisioningTester {
}
public Autoscaling autoscale(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) {
- capacity = capacityPolicies.applyOn(capacity, applicationId, capacityPolicies.decideExclusivity(capacity, cluster).isExclusive());
+ capacity = capacityPolicies.applyOn(capacity, applicationId, capacityPolicies.decideExclusivity(capacity, cluster));
Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId))
.withCluster(cluster.id(), false, capacity);
try (Mutex lock = nodeRepository().applications().lock(applicationId)) {