aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHåkon Hallingstad <hakon@yahooinc.com>2023-11-04 21:47:19 +0100
committerHåkon Hallingstad <hakon@yahooinc.com>2023-11-04 21:47:19 +0100
commita0c08e57b4fabeb6cfb4b8272cd9d69308a6856e (patch)
tree172fcf3a96fd38816672ac53bbea0af5550c89d5
parent6e06c8d9cb0a8a05009be93f9d31005827443783 (diff)
Rename to ClusterAllocationParams
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java24
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java18
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java32
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ClusterAllocationFeatures.java29
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ClusterAllocationParams.java29
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java16
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java48
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java16
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java38
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java18
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java6
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java8
24 files changed, 186 insertions, 186 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index 9d597a63fec..5a7426a90a9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -26,7 +26,7 @@ import com.yahoo.vespa.hosted.provision.persistence.CuratorDb;
import com.yahoo.vespa.hosted.provision.persistence.DnsNameResolver;
import com.yahoo.vespa.hosted.provision.persistence.JobControlFlags;
import com.yahoo.vespa.hosted.provision.persistence.NameResolver;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import com.yahoo.vespa.hosted.provision.provisioning.ContainerImages;
import com.yahoo.vespa.hosted.provision.provisioning.FirmwareChecks;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
@@ -198,8 +198,8 @@ public class NodeRepository extends AbstractComponent {
public int spareCount() { return spareCount; }
/** Returns whether nodes must be allocated to hosts that are exclusive to the cluster type. */
- public boolean exclusiveClusterType(ClusterAllocationFeatures features, ClusterSpec cluster) {
- return features.sharedHost().hasClusterType(cluster.type().name());
+ public boolean exclusiveClusterType(ClusterAllocationParams params, ClusterSpec cluster) {
+ return params.sharedHost().hasClusterType(cluster.type().name());
}
/**
@@ -207,10 +207,10 @@ public class NodeRepository extends AbstractComponent {
* Exclusive allocation requires that the wanted node resources matches the advertised resources of the node
* perfectly.
*/
- public boolean exclusiveAllocation(ClusterAllocationFeatures features, ClusterSpec clusterSpec) {
+ public boolean exclusiveAllocation(ClusterAllocationParams params, ClusterSpec clusterSpec) {
return clusterSpec.isExclusive() ||
( clusterSpec.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
- ( !zone().cloud().allowHostSharing() && !features.sharedHost().supportsClusterType(clusterSpec.type().name()));
+ ( !zone().cloud().allowHostSharing() && !params.sharedHost().supportsClusterType(clusterSpec.type().name()));
}
/** Whether the nodes of this cluster must be running on hosts that are specifically provisioned for the application. */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
index 7c8501c05b9..433866a8635 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
@@ -9,7 +9,7 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import java.time.Duration;
import java.util.List;
@@ -34,13 +34,13 @@ public class AllocatableResources {
private final double fulfilment;
/** Fake allocatable resources from requested capacity */
- public AllocatableResources(ClusterAllocationFeatures features,
+ public AllocatableResources(ClusterAllocationParams params,
ClusterResources requested,
ClusterSpec clusterSpec,
NodeRepository nodeRepository) {
this.nodes = requested.nodes();
this.groups = requested.groups();
- this.realResources = nodeRepository.resourcesCalculator().requestToReal(requested.nodeResources(), nodeRepository.exclusiveAllocation(features, clusterSpec), false);
+ this.realResources = nodeRepository.resourcesCalculator().requestToReal(requested.nodeResources(), nodeRepository.exclusiveAllocation(params, clusterSpec), false);
this.advertisedResources = requested.nodeResources();
this.clusterSpec = clusterSpec;
this.fulfilment = 1;
@@ -169,7 +169,7 @@ public class AllocatableResources {
.withBandwidthGbps(sum.bandwidthGbps() / nodes.size());
}
- public static Optional<AllocatableResources> from(ClusterAllocationFeatures features,
+ public static Optional<AllocatableResources> from(ClusterAllocationParams params,
ClusterResources wantedResources,
ApplicationId applicationId,
ClusterSpec clusterSpec,
@@ -178,10 +178,10 @@ public class AllocatableResources {
ClusterModel model,
NodeRepository nodeRepository) {
var systemLimits = nodeRepository.nodeResourceLimits();
- boolean exclusive = nodeRepository.exclusiveAllocation(features, clusterSpec);
+ boolean exclusive = nodeRepository.exclusiveAllocation(params, clusterSpec);
if (! exclusive) {
// We decide resources: Add overhead to what we'll request (advertised) to make sure real becomes (at least) cappedNodeResources
- var allocatableResources = calculateAllocatableResources(features,
+ var allocatableResources = calculateAllocatableResources(params,
wantedResources,
nodeRepository,
applicationId,
@@ -193,8 +193,8 @@ public class AllocatableResources {
var worstCaseRealResources = nodeRepository.resourcesCalculator().requestToReal(allocatableResources.advertisedResources,
exclusive,
false);
- if ( ! systemLimits.isWithinRealLimits(features, worstCaseRealResources, applicationId, clusterSpec)) {
- allocatableResources = calculateAllocatableResources(features,
+ if ( ! systemLimits.isWithinRealLimits(params, worstCaseRealResources, applicationId, clusterSpec)) {
+ allocatableResources = calculateAllocatableResources(params,
wantedResources,
nodeRepository,
applicationId,
@@ -204,7 +204,7 @@ public class AllocatableResources {
false);
}
- if ( ! systemLimits.isWithinRealLimits(features, allocatableResources.realResources, applicationId, clusterSpec))
+ if ( ! systemLimits.isWithinRealLimits(params, allocatableResources.realResources, applicationId, clusterSpec))
return Optional.empty();
if ( ! anySatisfies(allocatableResources.realResources, availableRealHostResources))
return Optional.empty();
@@ -233,7 +233,7 @@ public class AllocatableResources {
}
if ( ! between(applicationLimits.min().nodeResources(), applicationLimits.max().nodeResources(), advertisedResources)) continue;
- if ( ! systemLimits.isWithinRealLimits(features, realResources, applicationId, clusterSpec)) continue;
+ if ( ! systemLimits.isWithinRealLimits(params, realResources, applicationId, clusterSpec)) continue;
var candidate = new AllocatableResources(wantedResources.with(realResources),
advertisedResources,
@@ -256,7 +256,7 @@ public class AllocatableResources {
}
}
- private static AllocatableResources calculateAllocatableResources(ClusterAllocationFeatures features,
+ private static AllocatableResources calculateAllocatableResources(ClusterAllocationParams params,
ClusterResources wantedResources,
NodeRepository nodeRepository,
ApplicationId applicationId,
@@ -269,7 +269,7 @@ public class AllocatableResources {
advertisedResources = systemLimits.enlargeToLegal(advertisedResources, applicationId, clusterSpec, exclusive, true); // Ask for something legal
advertisedResources = applicationLimits.cap(advertisedResources); // Overrides other conditions, even if it will then fail
var realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive, bestCase); // What we'll really get
- if ( ! systemLimits.isWithinRealLimits(features, realResources, applicationId, clusterSpec)
+ if ( ! systemLimits.isWithinRealLimits(params, realResources, applicationId, clusterSpec)
&& advertisedResources.storageType() == NodeResources.StorageType.any) {
// Since local disk reserves some of the storage, try to constrain to remote disk
advertisedResources = advertisedResources.with(NodeResources.StorageType.remote);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index 7c09e3fa041..400a231a2c8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -5,7 +5,7 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import java.util.Optional;
@@ -35,7 +35,7 @@ public class AllocationOptimizer {
* @return the best allocation, if there are any possible legal allocations, fulfilling the target
* fully or partially, within the limits
*/
- public Optional<AllocatableResources> findBestAllocation(ClusterAllocationFeatures features,
+ public Optional<AllocatableResources> findBestAllocation(ClusterAllocationParams params,
Load loadAdjustment,
ClusterModel model,
Limits limits) {
@@ -44,12 +44,12 @@ public class AllocationOptimizer {
new ClusterResources(maximumNodes, maximumNodes, NodeResources.unspecified()),
IntRange.empty());
else
- limits = atLeast(minimumNodes, limits).fullySpecified(features, model.current().clusterSpec(), nodeRepository, model.application().id());
+ limits = atLeast(minimumNodes, limits).fullySpecified(params, model.current().clusterSpec(), nodeRepository, model.application().id());
Optional<AllocatableResources> bestAllocation = Optional.empty();
var availableRealHostResources = nodeRepository.zone().cloud().dynamicProvisioning()
? nodeRepository.flavors().getFlavors().stream().map(flavor -> flavor.resources()).toList()
: nodeRepository.nodes().list().hosts().stream().map(host -> host.flavor().resources())
- .map(hostResources -> maxResourcesOf(features, hostResources, model))
+ .map(hostResources -> maxResourcesOf(params, hostResources, model))
.toList();
for (int groups = limits.min().groups(); groups <= limits.max().groups(); groups++) {
for (int nodes = limits.min().nodes(); nodes <= limits.max().nodes(); nodes++) {
@@ -59,7 +59,7 @@ public class AllocationOptimizer {
groups,
nodeResourcesWith(nodes, groups,
limits, loadAdjustment, model));
- var allocatableResources = AllocatableResources.from(features,
+ var allocatableResources = AllocatableResources.from(params,
resources,
model.application().id(),
model.current().clusterSpec(),
@@ -76,8 +76,8 @@ public class AllocationOptimizer {
}
/** Returns the max resources of a host one node may allocate. */
- private NodeResources maxResourcesOf(ClusterAllocationFeatures features, NodeResources hostResources, ClusterModel model) {
- if (nodeRepository.exclusiveAllocation(features, model.clusterSpec())) return hostResources;
+ private NodeResources maxResourcesOf(ClusterAllocationParams params, NodeResources hostResources, ClusterModel model) {
+ if (nodeRepository.exclusiveAllocation(params, model.clusterSpec())) return hostResources;
// static, shared hosts: Allocate at most half of the host cpu to simplify management
return hostResources.withVcpu(hostResources.vcpu() / 2);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 60fd75051fc..6aca5548f6e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -7,7 +7,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling.Status;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import java.time.Duration;
@@ -55,8 +55,8 @@ public class Autoscaler {
}
private Autoscaling autoscale(Application application, Cluster cluster, NodeList clusterNodes, Limits limits) {
- ClusterAllocationFeatures features = ClusterAllocationFeatures.from(nodeRepository.flagSource(), application.id(), clusterNodes.clusterSpec().vespaVersion());
- var model = new ClusterModel(features,
+ ClusterAllocationParams params = ClusterAllocationParams.from(nodeRepository.flagSource(), application.id(), clusterNodes.clusterSpec().vespaVersion());
+ var model = new ClusterModel(params,
nodeRepository,
application,
clusterNodes.not().retired().clusterSpec(),
@@ -76,7 +76,7 @@ public class Autoscaler {
var loadAdjustment = model.loadAdjustment();
// Ensure we only scale down if we'll have enough headroom to not scale up again given a small load increase
- var target = allocationOptimizer.findBestAllocation(features, loadAdjustment, model, limits);
+ var target = allocationOptimizer.findBestAllocation(params, loadAdjustment, model, limits);
if (target.isEmpty())
return Autoscaling.dontScale(Status.insufficient, "No allocations are possible within configured limits", model);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index 891d90acc03..bcdead6b375 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -8,7 +8,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import java.time.Clock;
import java.time.Duration;
@@ -47,7 +47,7 @@ public class ClusterModel {
// TODO: Measure this, and only take it into account with queries
private static final double fixedCpuCostFraction = 0.1;
- private final ClusterAllocationFeatures features;
+ private final ClusterAllocationParams params;
private final NodeRepository nodeRepository;
private final Application application;
private final ClusterSpec clusterSpec;
@@ -76,7 +76,7 @@ public class ClusterModel {
private Double maxQueryGrowthRate = null;
private OptionalDouble averageQueryRate = null;
- public ClusterModel(ClusterAllocationFeatures features,
+ public ClusterModel(ClusterAllocationParams params,
NodeRepository nodeRepository,
Application application,
ClusterSpec clusterSpec,
@@ -85,7 +85,7 @@ public class ClusterModel {
AllocatableResources current,
MetricsDb metricsDb,
Clock clock) {
- this.features = features;
+ this.params = params;
this.nodeRepository = nodeRepository;
this.application = application;
this.clusterSpec = clusterSpec;
@@ -100,7 +100,7 @@ public class ClusterModel {
this.at = clock.instant();
}
- ClusterModel(ClusterAllocationFeatures features,
+ ClusterModel(ClusterAllocationParams params,
NodeRepository nodeRepository,
Application application,
ClusterSpec clusterSpec,
@@ -111,7 +111,7 @@ public class ClusterModel {
Duration allocationDuration,
ClusterTimeseries clusterTimeseries,
ClusterNodesTimeseries nodeTimeseries) {
- this.features = features;
+ this.params = params;
this.nodeRepository = nodeRepository;
this.application = application;
this.clusterSpec = clusterSpec;
@@ -175,7 +175,7 @@ public class ClusterModel {
}
public boolean isExclusive() {
- return nodeRepository.exclusiveAllocation(features, clusterSpec);
+ return nodeRepository.exclusiveAllocation(params, clusterSpec);
}
/** Returns the relative load adjustment that should be made to this cluster given available measurements. */
@@ -438,12 +438,12 @@ public class ClusterModel {
double averageReal() {
if (nodes.isEmpty()) { // we're estimating
- var initialResources = new CapacityPolicies(nodeRepository).specifyFully(features,
+ var initialResources = new CapacityPolicies(nodeRepository).specifyFully(params,
cluster.minResources().nodeResources(),
clusterSpec,
application.id());
return nodeRepository.resourcesCalculator().requestToReal(initialResources,
- nodeRepository.exclusiveAllocation(features, clusterSpec),
+ nodeRepository.exclusiveAllocation(params, clusterSpec),
false).memoryGb();
}
else {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
index d6c4e65c3f1..077c0f0d9c4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
@@ -10,7 +10,7 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import java.util.Objects;
@@ -66,12 +66,12 @@ public class Limits {
return resources;
}
- public Limits fullySpecified(ClusterAllocationFeatures features, ClusterSpec clusterSpec, NodeRepository nodeRepository, ApplicationId applicationId) {
+ public Limits fullySpecified(ClusterAllocationParams params, ClusterSpec clusterSpec, NodeRepository nodeRepository, ApplicationId applicationId) {
if (this.isEmpty()) throw new IllegalStateException("Unspecified limits can not be made fully specified");
var capacityPolicies = new CapacityPolicies(nodeRepository);
- return new Limits(capacityPolicies.specifyFully(features, min, clusterSpec, applicationId),
- capacityPolicies.specifyFully(features, max, clusterSpec, applicationId),
+ return new Limits(capacityPolicies.specifyFully(params, min, clusterSpec, applicationId),
+ capacityPolicies.specifyFully(params, max, clusterSpec, applicationId),
groupSize);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
index 0a44c1e2438..7512f08ac05 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
@@ -25,7 +25,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
import com.yahoo.vespa.hosted.provision.node.IP;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisionRequest;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner.HostSharing;
@@ -158,9 +158,9 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
private List<Node> provision(NodeList nodeList) {
ApplicationId application = ApplicationId.defaultId();
- var features = ClusterAllocationFeatures.from(flagSource, application, Vtag.currentVersion);
+ var params = ClusterAllocationParams.from(flagSource, application, Vtag.currentVersion);
- return provisionUntilNoDeficit(features, application, nodeList).stream()
+ return provisionUntilNoDeficit(params, application, nodeList).stream()
.sorted(comparing(node -> node.history().events().stream()
.map(History.Event::at)
.min(naturalOrder())
@@ -191,7 +191,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
* @throws IllegalStateException if there was an algorithmic problem, and in case message
* should be sufficient (avoid no stack trace).
*/
- private List<Node> provisionUntilNoDeficit(ClusterAllocationFeatures features, ApplicationId application, NodeList nodeList) {
+ private List<Node> provisionUntilNoDeficit(ClusterAllocationParams params, ApplicationId application, NodeList nodeList) {
List<ClusterCapacity> preprovisionCapacity = preprovisionCapacityFlag.value();
// Worst-case each ClusterCapacity in preprovisionCapacity will require an allocation.
@@ -200,7 +200,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
var nodesPlusProvisioned = new ArrayList<>(nodeList.asList());
for (int numProvisions = 0;; ++numProvisions) {
var nodesPlusProvisionedPlusAllocated = new ArrayList<>(nodesPlusProvisioned);
- Optional<ClusterCapacity> deficit = allocatePreprovisionCapacity(application, preprovisionCapacity, nodesPlusProvisionedPlusAllocated, features);
+ Optional<ClusterCapacity> deficit = allocatePreprovisionCapacity(application, preprovisionCapacity, nodesPlusProvisionedPlusAllocated, params);
if (deficit.isEmpty()) {
return nodesPlusProvisionedPlusAllocated;
}
@@ -210,7 +210,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
}
ClusterCapacity clusterCapacityDeficit = deficit.get();
- nodesPlusProvisioned.addAll(provisionHosts(features,
+ nodesPlusProvisioned.addAll(provisionHosts(params,
clusterCapacityDeficit.count(),
toNodeResources(clusterCapacityDeficit),
Optional.ofNullable(clusterCapacityDeficit.clusterType()),
@@ -218,14 +218,14 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
}
}
- private List<Node> provisionHosts(ClusterAllocationFeatures features, int count, NodeResources nodeResources, Optional<String> clusterType, NodeList allNodes) {
+ private List<Node> provisionHosts(ClusterAllocationParams params, int count, NodeResources nodeResources, Optional<String> clusterType, NodeList allNodes) {
try {
if (throttler.throttle(allNodes, Agent.HostCapacityMaintainer)) {
throw new NodeAllocationException("Host provisioning is being throttled", true);
}
Version osVersion = nodeRepository().osVersions().targetFor(NodeType.host).orElse(Version.emptyVersion);
List<Integer> provisionIndices = nodeRepository().database().readProvisionIndices(count);
- HostSharing sharingMode = nodeRepository().exclusiveAllocation(features, asSpec(clusterType, 0)) ? HostSharing.exclusive : HostSharing.shared;
+ HostSharing sharingMode = nodeRepository().exclusiveAllocation(params, asSpec(clusterType, 0)) ? HostSharing.exclusive : HostSharing.shared;
HostProvisionRequest request = new HostProvisionRequest(provisionIndices, NodeType.host, nodeResources,
ApplicationId.defaultId(), osVersion,
sharingMode, clusterType.map(ClusterSpec.Type::valueOf), Optional.empty(),
@@ -233,7 +233,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
List<Node> hosts = new ArrayList<>();
Runnable waiter;
try (var lock = nodeRepository().nodes().lockUnallocated()) {
- waiter = hostProvisioner.provisionHosts(features,
+ waiter = hostProvisioner.provisionHosts(params,
request,
resources -> true,
provisionedHosts -> {
@@ -264,11 +264,11 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
private Optional<ClusterCapacity> allocatePreprovisionCapacity(ApplicationId application,
List<ClusterCapacity> preprovisionCapacity,
ArrayList<Node> mutableNodes,
- ClusterAllocationFeatures features) {
+ ClusterAllocationParams params) {
for (int clusterIndex = 0; clusterIndex < preprovisionCapacity.size(); ++clusterIndex) {
ClusterCapacity clusterCapacity = preprovisionCapacity.get(clusterIndex);
LockedNodeList allNodes = new LockedNodeList(mutableNodes, () -> {});
- List<Node> candidates = findCandidates(application, clusterCapacity, clusterIndex, allNodes, features);
+ List<Node> candidates = findCandidates(application, clusterCapacity, clusterIndex, allNodes, params);
int deficit = Math.max(0, clusterCapacity.count() - candidates.size());
if (deficit > 0) {
return Optional.of(clusterCapacity.withCount(deficit));
@@ -282,7 +282,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
}
private List<Node> findCandidates(ApplicationId application, ClusterCapacity clusterCapacity, int clusterIndex,
- LockedNodeList allNodes, ClusterAllocationFeatures features) {
+ LockedNodeList allNodes, ClusterAllocationParams params) {
NodeResources nodeResources = toNodeResources(clusterCapacity);
// We'll allocate each ClusterCapacity as a unique cluster in a dummy application
@@ -295,17 +295,17 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
NodePrioritizer prioritizer = new NodePrioritizer(allNodes, application, cluster, nodeSpec,
true, false, allocationContext, nodeRepository().nodes(),
nodeRepository().resourcesCalculator(), nodeRepository().spareCount(),
- nodeRepository().exclusiveAllocation(features, cluster), features);
+ nodeRepository().exclusiveAllocation(params, cluster), params);
List<NodeCandidate> nodeCandidates = prioritizer.collect()
.stream()
.filter(node -> node.violatesExclusivity(cluster,
application,
- nodeRepository().exclusiveClusterType(features, cluster),
- nodeRepository().exclusiveAllocation(features, cluster),
+ nodeRepository().exclusiveClusterType(params, cluster),
+ nodeRepository().exclusiveAllocation(params, cluster),
false,
nodeRepository().zone().cloud().allowHostSharing(),
allNodes,
- features)
+ params)
!= NodeCandidate.ExclusivityViolation.YES)
.toList();
MutableInteger index = new MutableInteger(0);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
index 795abd54771..a3094b382bf 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
@@ -12,7 +12,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Allocation;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import java.time.Duration;
import java.util.HashSet;
@@ -72,8 +72,8 @@ public class HostFlavorUpgrader extends NodeRepositoryMaintainer {
if (parent.isEmpty()) continue;
if (exhaustedFlavors.contains(parent.get().flavor().name())) continue;
Allocation allocation = node.allocation().get();
- var features = ClusterAllocationFeatures.from(nodeRepository().flagSource(), allocation.owner(), allocation.membership().cluster().vespaVersion());
- Predicate<NodeResources> realHostResourcesWithinLimits = resources -> nodeRepository().nodeResourceLimits().isWithinRealLimits(features, resources, allocation.owner(), allocation.membership().cluster());
+ var params = ClusterAllocationParams.from(nodeRepository().flagSource(), allocation.owner(), allocation.membership().cluster().vespaVersion());
+ Predicate<NodeResources> realHostResourcesWithinLimits = resources -> nodeRepository().nodeResourceLimits().isWithinRealLimits(params, resources, allocation.owner(), allocation.membership().cluster());
if (!hostProvisioner.canUpgradeFlavor(parent.get(), node, realHostResourcesWithinLimits)) continue;
if (parent.get().status().wantToUpgradeFlavor() && allocation.membership().retired()) continue; // Already upgrading
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index f1fca4a71e9..d831917bff3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -86,22 +86,22 @@ public class CapacityPolicies {
return target;
}
- public ClusterResources specifyFully(ClusterAllocationFeatures features, ClusterResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
- return resources.with(specifyFully(features, resources.nodeResources(), clusterSpec, applicationId));
+ public ClusterResources specifyFully(ClusterAllocationParams params, ClusterResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
+ return resources.with(specifyFully(params, resources.nodeResources(), clusterSpec, applicationId));
}
- public NodeResources specifyFully(ClusterAllocationFeatures features, NodeResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
- NodeResources amended = resources.withUnspecifiedFieldsFrom(defaultResources(features, clusterSpec, applicationId).with(DiskSpeed.any));
+ public NodeResources specifyFully(ClusterAllocationParams params, NodeResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
+ NodeResources amended = resources.withUnspecifiedFieldsFrom(defaultResources(params, clusterSpec, applicationId).with(DiskSpeed.any));
// TODO jonmv: remove this after all apps are 8.248.8 or above; architecture for admin nodes was not picked up before this.
if (clusterSpec.vespaVersion().isBefore(Version.fromString("8.248.8"))) amended = amended.with(resources.architecture());
return amended;
}
- private NodeResources defaultResources(ClusterAllocationFeatures features, ClusterSpec clusterSpec, ApplicationId applicationId) {
+ private NodeResources defaultResources(ClusterAllocationParams params, ClusterSpec clusterSpec, ApplicationId applicationId) {
if (clusterSpec.type() == ClusterSpec.Type.admin) {
Architecture architecture = adminClusterArchitecture(applicationId);
- if (nodeRepository.exclusiveAllocation(features, clusterSpec)) {
+ if (nodeRepository.exclusiveAllocation(params, clusterSpec)) {
return smallestExclusiveResources().with(architecture);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ClusterAllocationFeatures.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ClusterAllocationFeatures.java
deleted file mode 100644
index 7af176e8715..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ClusterAllocationFeatures.java
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.provisioning;
-
-import com.yahoo.component.Version;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.vespa.flags.FetchVector;
-import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
-import com.yahoo.vespa.flags.PermanentFlags;
-import com.yahoo.vespa.flags.custom.SharedHost;
-
-/**
- * Miscellaneous constants used while preparing an allocation for a cluster.
- *
- * <p>Typically used to access feature flags that was evaluated once and at the start of the preparation,
- * to avoid inconsistencies if evaluated multiple times during preparation.</p>
- *
- * @author hakonhall
- */
-public record ClusterAllocationFeatures(boolean makeExclusive, SharedHost sharedHost) {
- public static ClusterAllocationFeatures from(FlagSource flagSource, ApplicationId application, Version version) {
- return new ClusterAllocationFeatures(Flags.MAKE_EXCLUSIVE.bindTo(flagSource)
- .with(FetchVector.Dimension.TENANT_ID, application.tenant().value())
- .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm())
- .with(FetchVector.Dimension.VESPA_VERSION, version.toFullString())
- .value(),
- PermanentFlags.SHARED_HOST.bindTo(flagSource).value());
- }
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ClusterAllocationParams.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ClusterAllocationParams.java
new file mode 100644
index 00000000000..1e2350a65ac
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ClusterAllocationParams.java
@@ -0,0 +1,29 @@
+// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.provisioning;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.vespa.flags.FetchVector;
+import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.PermanentFlags;
+import com.yahoo.vespa.flags.custom.SharedHost;
+
+/**
+ * Miscellaneous constants used while preparing an allocation for a cluster.
+ *
+ * <p>Typically used to access feature flags that was evaluated once and at the start of the preparation,
+ * to avoid inconsistencies if evaluated multiple times during preparation.</p>
+ *
+ * @author hakonhall
+ */
+public record ClusterAllocationParams(boolean makeExclusive, SharedHost sharedHost) {
+ public static ClusterAllocationParams from(FlagSource flagSource, ApplicationId application, Version version) {
+ return new ClusterAllocationParams(Flags.MAKE_EXCLUSIVE.bindTo(flagSource)
+ .with(FetchVector.Dimension.TENANT_ID, application.tenant().value())
+ .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm())
+ .with(FetchVector.Dimension.VESPA_VERSION, version.toFullString())
+ .value(),
+ PermanentFlags.SHARED_HOST.bindTo(flagSource).value());
+ }
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
index 5b4d003788c..f407628399f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
@@ -53,7 +53,7 @@ public interface HostProvisioner {
* @return a runnable that waits for the provisioning request to finish. It can be run without holding any locks,
* but may fail with an exception that should be propagated to the user initiating prepare()
*/
- Runnable provisionHosts(ClusterAllocationFeatures features,
+ Runnable provisionHosts(ClusterAllocationParams params,
HostProvisionRequest request,
Predicate<NodeResources> realHostResourcesWithinLimits,
Consumer<List<ProvisionedHost>> whenProvisioned) throws NodeAllocationException;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
index 3f106feb50f..0c830d55471 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeAllocation.java
@@ -84,10 +84,10 @@ class NodeAllocation {
private final NodeRepository nodeRepository;
private final Optional<String> requiredHostFlavor;
- private final ClusterAllocationFeatures features;
+ private final ClusterAllocationParams params;
NodeAllocation(NodeList allNodes, ApplicationId application, ClusterSpec cluster, NodeSpec requested,
- Supplier<Integer> nextIndex, NodeRepository nodeRepository, ClusterAllocationFeatures features) {
+ Supplier<Integer> nextIndex, NodeRepository nodeRepository, ClusterAllocationParams params) {
this.allNodes = allNodes;
this.application = application;
this.cluster = cluster;
@@ -100,7 +100,7 @@ class NodeAllocation {
.with(FetchVector.Dimension.CLUSTER_ID, cluster.id().value())
.value())
.filter(s -> !s.isBlank());
- this.features = features;
+ this.params = params;
}
/**
@@ -133,7 +133,7 @@ class NodeAllocation {
}
}
else if ( ! saturated() && hasCompatibleResources(candidate)) {
- if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(features, candidate, application, cluster)) {
+ if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(params, candidate, application, cluster)) {
++rejectedDueToInsufficientRealResources;
continue;
}
@@ -169,7 +169,7 @@ class NodeAllocation {
boolean alreadyRetired = candidate.allocation().map(a -> a.membership().retired()).orElse(false);
return alreadyRetired ? Retirement.alreadyRetired : Retirement.none;
}
- if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(features, candidate, application, cluster)) return Retirement.outsideRealLimits;
+ if ( ! nodeRepository.nodeResourceLimits().isWithinRealLimits(params, candidate, application, cluster)) return Retirement.outsideRealLimits;
if (violatesParentHostPolicy(candidate)) return Retirement.violatesParentHostPolicy;
if ( ! hasCompatibleResources(candidate)) return Retirement.incompatibleResources;
if (candidate.parent.map(node -> node.status().wantToUpgradeFlavor()).orElse(false)) return Retirement.violatesHostFlavorGeneration;
@@ -198,10 +198,10 @@ class NodeAllocation {
private NodeCandidate.ExclusivityViolation violatesExclusivity(NodeCandidate candidate) {
return candidate.violatesExclusivity(cluster, application,
- nodeRepository.exclusiveClusterType(features, cluster),
- nodeRepository.exclusiveAllocation(features, cluster),
+ nodeRepository.exclusiveClusterType(params, cluster),
+ nodeRepository.exclusiveAllocation(params, cluster),
nodeRepository.exclusiveProvisioning(cluster),
- nodeRepository.zone().cloud().allowHostSharing(), allNodes, features);
+ nodeRepository.zone().cloud().allowHostSharing(), allNodes, params);
}
/**
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
index 28cef679189..5993320e25f 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeCandidate.java
@@ -596,7 +596,7 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
public ExclusivityViolation violatesExclusivity(ClusterSpec cluster, ApplicationId application,
boolean exclusiveClusterType, boolean exclusiveAllocation, boolean exclusiveProvisioning,
- boolean hostSharing, NodeList allNodes, ClusterAllocationFeatures features) {
+ boolean hostSharing, NodeList allNodes, ClusterAllocationParams params) {
if (parentHostname().isEmpty()) return ExclusivityViolation.NONE;
if (type() != NodeType.tenant) return ExclusivityViolation.NONE;
@@ -615,7 +615,7 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
return ExclusivityViolation.YES;
// this cluster requires a parent that was provisioned exclusively for this cluster type
- if (exclusiveClusterType && parent.flatMap(Node::exclusiveToClusterType).isEmpty() && features.makeExclusive())
+ if (exclusiveClusterType && parent.flatMap(Node::exclusiveToClusterType).isEmpty() && params.makeExclusive())
return ExclusivityViolation.YES;
// the parent is provisioned for another application
@@ -632,7 +632,7 @@ public abstract class NodeCandidate implements Nodelike, Comparable<NodeCandidat
// this cluster requires exclusivity, but the parent is not exclusive
if (exclusiveAllocation && parent.flatMap(Node::exclusiveToApplicationId).isEmpty())
- return canMakeHostExclusive(features.makeExclusive(), type(), hostSharing) ?
+ return canMakeHostExclusive(params.makeExclusive(), type(), hostSharing) ?
ExclusivityViolation.PARENT_HOST_NOT_EXCLUSIVE :
ExclusivityViolation.YES;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index 3451caf9254..c5788051930 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -41,7 +41,7 @@ public class NodePrioritizer {
private final boolean dynamicProvisioning;
private final boolean allowHostSharing;
private final boolean exclusiveAllocation;
- private final ClusterAllocationFeatures features;
+ private final ClusterAllocationParams params;
private final boolean canAllocateToSpareHosts;
private final boolean topologyChange;
private final int currentClusterSize;
@@ -49,7 +49,7 @@ public class NodePrioritizer {
public NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec,
boolean dynamicProvisioning, boolean allowHostSharing, IP.Allocation.Context ipAllocationContext, Nodes nodes,
- HostResourcesCalculator hostResourcesCalculator, int spareCount, boolean exclusiveAllocation, ClusterAllocationFeatures features) {
+ HostResourcesCalculator hostResourcesCalculator, int spareCount, boolean exclusiveAllocation, ClusterAllocationParams params) {
this.allNodes = allNodes;
this.calculator = hostResourcesCalculator;
this.capacity = new HostCapacity(this.allNodes, hostResourcesCalculator);
@@ -59,7 +59,7 @@ public class NodePrioritizer {
this.dynamicProvisioning = dynamicProvisioning;
this.allowHostSharing = allowHostSharing;
this.exclusiveAllocation = exclusiveAllocation;
- this.features = features;
+ this.params = params;
this.spareHosts = dynamicProvisioning ?
capacity.findSpareHostsInDynamicallyProvisionedZones(this.allNodes.asList()) :
capacity.findSpareHosts(this.allNodes.asList(), spareCount);
@@ -127,7 +127,7 @@ public class NodePrioritizer {
if (nodes.suspended(host)) continue; // Hosts that are suspended may be down for some time, e.g. for OS upgrade
if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue;
if (host.reservedTo().isPresent() && application.instance().isTester()) continue;
- if (features.makeExclusive()) {
+ if (params.makeExclusive()) {
if ( ! allowHostSharing && exclusiveAllocation && ! fitsPerfectly(host)) continue;
} else {
if (host.exclusiveToApplicationId().isPresent() && ! fitsPerfectly(host)) continue;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 3093ebdcf99..931fe425598 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -85,37 +85,37 @@ public class NodeRepositoryProvisioner implements Provisioner {
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
log.log(Level.FINE, "Received deploy prepare request for " + requested +
" for application " + application + ", cluster " + cluster);
- var features = ClusterAllocationFeatures.from(nodeRepository.flagSource(), application, cluster.vespaVersion());
- validate(features, application, cluster, requested, logger);
+ var params = ClusterAllocationParams.from(nodeRepository.flagSource(), application, cluster.vespaVersion());
+ validate(params, application, cluster, requested, logger);
NodeResources resources;
NodeSpec nodeSpec;
if (requested.type() == NodeType.tenant) {
cluster = capacityPolicies.decideExclusivity(requested, cluster);
Capacity actual = capacityPolicies.applyOn(requested, application, cluster.isExclusive());
- ClusterResources target = decideTargetResources(features, application, cluster, actual);
+ ClusterResources target = decideTargetResources(params, application, cluster, actual);
validate(actual, target, cluster, application);
logIfDownscaled(requested.minResources().nodes(), actual.minResources().nodes(), cluster, logger);
- resources = getNodeResources(features, cluster, target.nodeResources(), application);
+ resources = getNodeResources(params, cluster, target.nodeResources(), application);
nodeSpec = NodeSpec.from(target.nodes(), target.groups(), resources, cluster.isExclusive(), actual.canFail(),
requested.cloudAccount().orElse(nodeRepository.zone().cloud().account()),
requested.clusterInfo().hostTTL());
}
else {
cluster = cluster.withExclusivity(true);
- resources = getNodeResources(features, cluster, requested.minResources().nodeResources(), application);
+ resources = getNodeResources(params, cluster, requested.minResources().nodeResources(), application);
nodeSpec = NodeSpec.from(requested.type(), nodeRepository.zone().cloud().account());
}
- return asSortedHosts(preparer.prepare(features, application, cluster, nodeSpec),
+ return asSortedHosts(preparer.prepare(params, application, cluster, nodeSpec),
requireCompatibleResources(resources, cluster));
}
- private void validate(ClusterAllocationFeatures features, ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
+ private void validate(ClusterAllocationParams params, ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");
- nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits(features, "Min", requested.minResources().nodeResources(), application, cluster);
- nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits(features, "Max", requested.maxResources().nodeResources(), application, cluster);
+ nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits(params, "Min", requested.minResources().nodeResources(), application, cluster);
+ nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits(params, "Max", requested.maxResources().nodeResources(), application, cluster);
if (!requested.minResources().nodeResources().gpuResources().equals(requested.maxResources().nodeResources().gpuResources()))
throw new IllegalArgumentException(requested + " is invalid: GPU capacity cannot have ranges");
@@ -133,8 +133,8 @@ public class NodeRepositoryProvisioner implements Provisioner {
}
}
- private NodeResources getNodeResources(ClusterAllocationFeatures features, ClusterSpec cluster, NodeResources nodeResources, ApplicationId applicationId) {
- return capacityPolicies.specifyFully(features, nodeResources, cluster, applicationId);
+ private NodeResources getNodeResources(ClusterAllocationParams params, ClusterSpec cluster, NodeResources nodeResources, ApplicationId applicationId) {
+ return capacityPolicies.specifyFully(params, nodeResources, cluster, applicationId);
}
@Override
@@ -166,18 +166,18 @@ public class NodeRepositoryProvisioner implements Provisioner {
* Returns the target cluster resources, a value between the min and max in the requested capacity,
* and updates the application store with the received min and max.
*/
- private ClusterResources decideTargetResources(ClusterAllocationFeatures features, ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) {
+ private ClusterResources decideTargetResources(ClusterAllocationParams params, ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) {
try (Mutex lock = nodeRepository.applications().lock(applicationId)) {
var application = nodeRepository.applications().get(applicationId).orElse(Application.empty(applicationId))
.withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested);
nodeRepository.applications().put(application, lock);
var cluster = application.cluster(clusterSpec.id()).get();
- return cluster.target().resources().orElseGet(() -> currentResources(features, application, clusterSpec, cluster, requested));
+ return cluster.target().resources().orElseGet(() -> currentResources(params, application, clusterSpec, cluster, requested));
}
}
/** Returns the current resources of this cluster, or requested min if none */
- private ClusterResources currentResources(ClusterAllocationFeatures features,
+ private ClusterResources currentResources(ClusterAllocationParams params,
Application application,
ClusterSpec clusterSpec,
Cluster cluster,
@@ -189,19 +189,19 @@ public class NodeRepositoryProvisioner implements Provisioner {
boolean firstDeployment = nodes.isEmpty();
var current =
firstDeployment // start at min, preserve current resources otherwise
- ? new AllocatableResources(features, initialResourcesFrom(features, requested, clusterSpec, application.id()), clusterSpec, nodeRepository)
+ ? new AllocatableResources(params, initialResourcesFrom(params, requested, clusterSpec, application.id()), clusterSpec, nodeRepository)
: new AllocatableResources(nodes, nodeRepository);
- var model = new ClusterModel(features, nodeRepository, application, clusterSpec, cluster, nodes, current, nodeRepository.metricsDb(), nodeRepository.clock());
- return within(features, Limits.of(requested), model, firstDeployment);
+ var model = new ClusterModel(params, nodeRepository, application, clusterSpec, cluster, nodes, current, nodeRepository.metricsDb(), nodeRepository.clock());
+ return within(params, Limits.of(requested), model, firstDeployment);
}
- private ClusterResources initialResourcesFrom(ClusterAllocationFeatures features, Capacity requested, ClusterSpec clusterSpec, ApplicationId applicationId) {
- return capacityPolicies.specifyFully(features, requested.minResources(), clusterSpec, applicationId);
+ private ClusterResources initialResourcesFrom(ClusterAllocationParams params, Capacity requested, ClusterSpec clusterSpec, ApplicationId applicationId) {
+ return capacityPolicies.specifyFully(params, requested.minResources(), clusterSpec, applicationId);
}
/** Make the minimal adjustments needed to the current resources to stay within the limits */
- private ClusterResources within(ClusterAllocationFeatures features,
+ private ClusterResources within(ClusterAllocationParams params,
Limits limits,
ClusterModel model,
boolean firstDeployment) {
@@ -212,11 +212,11 @@ public class NodeRepositoryProvisioner implements Provisioner {
return model.current().advertisedResources();
// Otherwise, find an allocation that preserves the current resources as well as possible
- return allocationOptimizer.findBestAllocation(features,
+ return allocationOptimizer.findBestAllocation(params,
Load.one(),
model,
limits)
- .orElseThrow(() -> newNoAllocationPossible(features, model.current().clusterSpec(), limits))
+ .orElseThrow(() -> newNoAllocationPossible(params, model.current().clusterSpec(), limits))
.advertisedResources();
}
@@ -281,10 +281,10 @@ public class NodeRepositoryProvisioner implements Provisioner {
return nodeResources;
}
- private IllegalArgumentException newNoAllocationPossible(ClusterAllocationFeatures features, ClusterSpec spec, Limits limits) {
+ private IllegalArgumentException newNoAllocationPossible(ClusterAllocationParams params, ClusterSpec spec, Limits limits) {
StringBuilder message = new StringBuilder("No allocation possible within ").append(limits);
- if (nodeRepository.exclusiveAllocation(features, spec) && findNearestNodeResources(limits).isPresent())
+ if (nodeRepository.exclusiveAllocation(params, spec) && findNearestNodeResources(limits).isPresent())
message.append(". Nearest allowed node resources: ").append(findNearestNodeResources(limits).get());
return new IllegalArgumentException(message.toString());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index f4e527c81c4..47b0a61d448 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -31,8 +31,8 @@ public class NodeResourceLimits {
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
- public void ensureWithinAdvertisedLimits(ClusterAllocationFeatures features, String type, NodeResources requested, ApplicationId applicationId, ClusterSpec cluster) {
- boolean exclusive = nodeRepository.exclusiveAllocation(features, cluster);
+ public void ensureWithinAdvertisedLimits(ClusterAllocationParams params, String type, NodeResources requested, ApplicationId applicationId, ClusterSpec cluster) {
+ boolean exclusive = nodeRepository.exclusiveAllocation(params, cluster);
if (! requested.vcpuIsUnspecified() && requested.vcpu() < minAdvertisedVcpu(applicationId, cluster, exclusive))
illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu(applicationId, cluster, exclusive));
if (! requested.memoryGbIsUnspecified() && requested.memoryGb() < minAdvertisedMemoryGb(applicationId, cluster, exclusive))
@@ -48,17 +48,17 @@ public class NodeResourceLimits {
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
- public boolean isWithinRealLimits(ClusterAllocationFeatures features, NodeCandidate candidateNode, ApplicationId applicationId, ClusterSpec cluster) {
+ public boolean isWithinRealLimits(ClusterAllocationParams params, NodeCandidate candidateNode, ApplicationId applicationId, ClusterSpec cluster) {
if (candidateNode.type() != NodeType.tenant) return true; // Resource limits only apply to tenant nodes
- return isWithinRealLimits(features, nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository),
+ return isWithinRealLimits(params, nodeRepository.resourcesCalculator().realResourcesOf(candidateNode, nodeRepository),
applicationId, cluster);
}
/** Returns whether the real resources we'll end up with on a given tenant node are within limits */
- public boolean isWithinRealLimits(ClusterAllocationFeatures features, NodeResources realResources, ApplicationId applicationId, ClusterSpec cluster) {
+ public boolean isWithinRealLimits(ClusterAllocationParams params, NodeResources realResources, ApplicationId applicationId, ClusterSpec cluster) {
if (realResources.isUnspecified()) return true;
- if (realResources.vcpu() < minRealVcpu(features, applicationId, cluster)) return false;
+ if (realResources.vcpu() < minRealVcpu(params, applicationId, cluster)) return false;
if (realResources.memoryGb() < minRealMemoryGb(cluster)) return false;
if (realResources.diskGb() < minRealDiskGb()) return false;
return true;
@@ -115,8 +115,8 @@ public class NodeResourceLimits {
return 4;
}
- private double minRealVcpu(ClusterAllocationFeatures features, ApplicationId applicationId, ClusterSpec cluster) {
- return minAdvertisedVcpu(applicationId, cluster, nodeRepository.exclusiveAllocation(features, cluster));
+ private double minRealVcpu(ClusterAllocationParams params, ApplicationId applicationId, ClusterSpec cluster) {
+ return minAdvertisedVcpu(applicationId, cluster, nodeRepository.exclusiveAllocation(params, cluster));
}
private static double minRealMemoryGb(ClusterSpec cluster) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index 94288fd44fa..3eaec67a89a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -58,7 +58,7 @@ public class Preparer {
/**
* Ensure sufficient nodes are reserved or active for the given application, group and cluster
*
- * @param features misc constants used in preparation
+ * @param params misc constants used in preparation
* @param application the application we are allocating to
* @param cluster the cluster and group we are allocating to
* @param requested a specification of the requested nodes
@@ -67,7 +67,7 @@ public class Preparer {
// Note: This operation may make persisted changes to the set of reserved and inactive nodes,
// but it may not change the set of active nodes, as the active nodes must stay in sync with the
// active config model which is changed on activate
- public List<Node> prepare(ClusterAllocationFeatures features, ApplicationId application, ClusterSpec cluster, NodeSpec requested) {
+ public List<Node> prepare(ClusterAllocationParams params, ApplicationId application, ClusterSpec cluster, NodeSpec requested) {
log.log(Level.FINE, () -> "Preparing " + cluster.type().name() + " " + cluster.id() + " with requested resources " +
requested.resources().orElse(NodeResources.unspecified()));
@@ -77,7 +77,7 @@ public class Preparer {
// and we can return nodes previously allocated.
LockedNodeList allNodes = nodeRepository.nodes().list(PROBE_LOCK);
NodeIndices indices = new NodeIndices(cluster.id(), allNodes);
- NodeAllocation probeAllocation = prepareAllocation(application, cluster, requested, indices::probeNext, allNodes, features);
+ NodeAllocation probeAllocation = prepareAllocation(application, cluster, requested, indices::probeNext, allNodes, params);
if (probeAllocation.fulfilledAndNoChanges()) {
List<Node> acceptedNodes = probeAllocation.finalNodes();
indices.commitProbe();
@@ -85,28 +85,28 @@ public class Preparer {
} else {
// There were some changes, so re-do the allocation with locks
indices.resetProbe();
- return prepareWithLocks(application, cluster, requested, indices, features);
+ return prepareWithLocks(application, cluster, requested, indices, params);
}
}
- private ApplicationMutex parentLockOrNull(ClusterAllocationFeatures features, NodeType type) {
- return NodeCandidate.canMakeHostExclusive(features.makeExclusive(), type, nodeRepository.zone().cloud().allowHostSharing()) ?
+ private ApplicationMutex parentLockOrNull(ClusterAllocationParams params, NodeType type) {
+ return NodeCandidate.canMakeHostExclusive(params.makeExclusive(), type, nodeRepository.zone().cloud().allowHostSharing()) ?
nodeRepository.applications().lock(InfrastructureApplication.withNodeType(type.parentNodeType()).id()) :
null;
}
/// Note that this will write to the node repo.
- private List<Node> prepareWithLocks(ApplicationId application, ClusterSpec cluster, NodeSpec requested, NodeIndices indices, ClusterAllocationFeatures features) {
+ private List<Node> prepareWithLocks(ApplicationId application, ClusterSpec cluster, NodeSpec requested, NodeIndices indices, ClusterAllocationParams params) {
Runnable waiter = null;
List<Node> acceptedNodes;
try (Mutex lock = nodeRepository.applications().lock(application);
- ApplicationMutex parentLockOrNull = parentLockOrNull(features, requested.type());
+ ApplicationMutex parentLockOrNull = parentLockOrNull(params, requested.type());
Mutex allocationLock = nodeRepository.nodes().lockUnallocated()) {
LockedNodeList allNodes = nodeRepository.nodes().list(allocationLock);
- NodeAllocation allocation = prepareAllocation(application, cluster, requested, indices::next, allNodes, features);
+ NodeAllocation allocation = prepareAllocation(application, cluster, requested, indices::next, allNodes, params);
NodeType hostType = allocation.nodeType().hostType();
if (canProvisionDynamically(hostType) && allocation.hostDeficit().isPresent()) {
- HostSharing sharing = hostSharing(features, cluster, hostType);
+ HostSharing sharing = hostSharing(params, cluster, hostType);
Version osVersion = nodeRepository.osVersions().targetFor(hostType).orElse(Version.emptyVersion);
NodeAllocation.HostDeficit deficit = allocation.hostDeficit().get();
Set<Node> hosts = new LinkedHashSet<>();
@@ -140,8 +140,8 @@ public class Preparer {
requested.cloudAccount(),
deficit.dueToFlavorUpgrade());
Predicate<NodeResources> realHostResourcesWithinLimits = resources ->
- nodeRepository.nodeResourceLimits().isWithinRealLimits(features, resources, application, cluster);
- waiter = hostProvisioner.get().provisionHosts(features, request, realHostResourcesWithinLimits, whenProvisioned);
+ nodeRepository.nodeResourceLimits().isWithinRealLimits(params, resources, application, cluster);
+ waiter = hostProvisioner.get().provisionHosts(params, request, realHostResourcesWithinLimits, whenProvisioned);
} catch (NodeAllocationException e) {
// Mark the nodes that were written to ZK in the consumer for deprovisioning. While these hosts do
// not exist, we cannot remove them from ZK here because other nodes may already have been
@@ -155,7 +155,7 @@ public class Preparer {
// Non-dynamically provisioned zone with a deficit because we just now retired some nodes.
// Try again, but without retiring
indices.resetProbe();
- List<Node> accepted = prepareWithLocks(application, cluster, cns.withoutRetiring(), indices, features);
+ List<Node> accepted = prepareWithLocks(application, cluster, cns.withoutRetiring(), indices, params);
log.warning("Prepared " + application + " " + cluster.id() + " without retirement due to lack of capacity");
return accepted;
}
@@ -187,9 +187,9 @@ public class Preparer {
}
private NodeAllocation prepareAllocation(ApplicationId application, ClusterSpec cluster, NodeSpec requested,
- Supplier<Integer> nextIndex, LockedNodeList allNodes, ClusterAllocationFeatures features) {
+ Supplier<Integer> nextIndex, LockedNodeList allNodes, ClusterAllocationParams params) {
validateAccount(requested.cloudAccount(), application, allNodes);
- NodeAllocation allocation = new NodeAllocation(allNodes, application, cluster, requested, nextIndex, nodeRepository, features);
+ NodeAllocation allocation = new NodeAllocation(allNodes, application, cluster, requested, nextIndex, nodeRepository, params);
var allocationContext = IP.Allocation.Context.from(nodeRepository.zone().cloud().name(),
requested.cloudAccount().isExclave(nodeRepository.zone()),
nodeRepository.nameResolver());
@@ -203,8 +203,8 @@ public class Preparer {
nodeRepository.nodes(),
nodeRepository.resourcesCalculator(),
nodeRepository.spareCount(),
- nodeRepository.exclusiveAllocation(features, cluster),
- features);
+ nodeRepository.exclusiveAllocation(params, cluster),
+ params);
allocation.offer(prioritizer.collect());
return allocation;
}
@@ -231,10 +231,10 @@ public class Preparer {
(hostType == NodeType.host || hostType.isConfigServerHostLike());
}
- private HostSharing hostSharing(ClusterAllocationFeatures features, ClusterSpec cluster, NodeType hostType) {
+ private HostSharing hostSharing(ClusterAllocationParams params, ClusterSpec cluster, NodeType hostType) {
if ( hostType.isSharable())
return nodeRepository.exclusiveProvisioning(cluster) ? HostSharing.provision :
- nodeRepository.exclusiveAllocation(features, cluster) ? HostSharing.exclusive :
+ nodeRepository.exclusiveAllocation(params, cluster) ? HostSharing.exclusive :
HostSharing.any;
else
return HostSharing.any;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
index f5d6028022a..5fca466d8ee 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
@@ -13,7 +13,7 @@ import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.autoscale.Limits;
import com.yahoo.vespa.hosted.provision.autoscale.Load;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import java.net.URI;
import java.util.List;
@@ -25,17 +25,17 @@ import java.util.List;
*/
public class ApplicationSerializer {
- public static Slime toSlime(ClusterAllocationFeatures features,
+ public static Slime toSlime(ClusterAllocationParams params,
Application application,
NodeList applicationNodes,
NodeRepository nodeRepository,
URI applicationUri) {
Slime slime = new Slime();
- toSlime(features, application, applicationNodes, nodeRepository, slime.setObject(), applicationUri);
+ toSlime(params, application, applicationNodes, nodeRepository, slime.setObject(), applicationUri);
return slime;
}
- private static void toSlime(ClusterAllocationFeatures features,
+ private static void toSlime(ClusterAllocationParams params,
Application application,
NodeList applicationNodes,
NodeRepository nodeRepository,
@@ -43,18 +43,18 @@ public class ApplicationSerializer {
URI applicationUri) {
object.setString("url", applicationUri.toString());
object.setString("id", application.id().toFullString());
- clustersToSlime(features, application, applicationNodes, nodeRepository, object.setObject("clusters"));
+ clustersToSlime(params, application, applicationNodes, nodeRepository, object.setObject("clusters"));
}
- private static void clustersToSlime(ClusterAllocationFeatures features,
+ private static void clustersToSlime(ClusterAllocationParams params,
Application application,
NodeList applicationNodes,
NodeRepository nodeRepository,
Cursor clustersObject) {
- application.clusters().values().forEach(cluster -> toSlime(features, application, cluster, applicationNodes, nodeRepository, clustersObject));
+ application.clusters().values().forEach(cluster -> toSlime(params, application, cluster, applicationNodes, nodeRepository, clustersObject));
}
- private static void toSlime(ClusterAllocationFeatures features,
+ private static void toSlime(ClusterAllocationParams params,
Application application,
Cluster cluster,
NodeList applicationNodes,
@@ -65,7 +65,7 @@ public class ApplicationSerializer {
ClusterResources currentResources = nodes.toResources();
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
clusterObject.setString("type", nodes.clusterSpec().type().name());
- Limits limits = Limits.of(cluster).fullySpecified(features, nodes.clusterSpec(), nodeRepository, application.id());
+ Limits limits = Limits.of(cluster).fullySpecified(params, nodes.clusterSpec(), nodeRepository, application.id());
toSlime(limits.min(), clusterObject.setObject("min"));
toSlime(limits.max(), clusterObject.setObject("max"));
if ( ! cluster.groupSize().isEmpty())
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
index 09eb111653d..5f96895b655 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
@@ -47,7 +47,7 @@ import com.yahoo.vespa.hosted.provision.node.filter.NodeOsVersionFilter;
import com.yahoo.vespa.hosted.provision.node.filter.NodeTypeFilter;
import com.yahoo.vespa.hosted.provision.node.filter.ParentHostFilter;
import com.yahoo.vespa.hosted.provision.maintenance.InfraApplicationRedeployer;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import com.yahoo.vespa.hosted.provision.restapi.NodesResponse.ResponseType;
import com.yahoo.vespa.orchestrator.Orchestrator;
import com.yahoo.yolean.Exceptions;
@@ -465,8 +465,8 @@ public class NodesV2ApiHandler extends ThreadedHttpRequestHandler {
Optional<Application> application = nodeRepository.applications().get(id);
if (application.isEmpty())
return ErrorResponse.notFoundError("No application '" + id + "'");
- var features = ClusterAllocationFeatures.from(nodeRepository.flagSource(), id, Vtag.currentVersion);
- Slime slime = ApplicationSerializer.toSlime(features,
+ var params = ClusterAllocationParams.from(nodeRepository.flagSource(), id, Vtag.currentVersion);
+ Slime slime = ApplicationSerializer.toSlime(params,
application.get(),
nodeRepository.nodes().list(Node.State.active).owner(id),
nodeRepository,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
index f249d10c21d..7bf6aec60ca 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
@@ -12,7 +12,7 @@ import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import com.yahoo.vespa.hosted.provision.provisioning.FatalProvisioningException;
import com.yahoo.vespa.hosted.provision.provisioning.HostIpConfig;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisionRequest;
@@ -74,7 +74,7 @@ public class MockHostProvisioner implements HostProvisioner {
}
@Override
- public Runnable provisionHosts(ClusterAllocationFeatures features,
+ public Runnable provisionHosts(ClusterAllocationParams params,
HostProvisionRequest request,
Predicate<NodeResources> realHostResourcesWithinLimits,
Consumer<List<ProvisionedHost>> whenProvisioned) throws NodeAllocationException {
@@ -268,7 +268,7 @@ public class MockHostProvisioner implements HostProvisioner {
/** Fail call to {@link MockHostProvisioner#provision(com.yahoo.vespa.hosted.provision.Node)} */
failProvisioning,
- /** Fail call to {@link MockHostProvisioner#provisionHosts(ClusterAllocationFeatures, HostProvisionRequest, Predicate, Consumer)} */
+ /** Fail call to {@link MockHostProvisioner#provisionHosts(ClusterAllocationParams, HostProvisionRequest, Predicate, Consumer)} */
failProvisionRequest,
/** Fail call to {@link MockHostProvisioner#deprovision(com.yahoo.vespa.hosted.provision.Node)} */
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index 6fa0b4beeba..921c059f3bf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -389,7 +389,7 @@ public class AutoscalingTest {
.build();
NodeResources defaultResources =
- new CapacityPolicies(fixture.tester().nodeRepository()).specifyFully(fixture.features, NodeResources.unspecified(), fixture.clusterSpec, fixture.applicationId);
+ new CapacityPolicies(fixture.tester().nodeRepository()).specifyFully(fixture.params, NodeResources.unspecified(), fixture.clusterSpec, fixture.applicationId);
fixture.tester().assertResources("Min number of nodes and default resources",
2, 1, defaultResources,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
index c02a9221485..561cc279669 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
@@ -10,7 +10,7 @@ import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.Status;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
import org.junit.Test;
@@ -90,12 +90,12 @@ public class ClusterModelTest {
Cluster cluster = cluster();
application = application.with(cluster);
var nodeRepository = new ProvisioningTester.Builder().build().nodeRepository();
- var features = ClusterAllocationFeatures.from(nodeRepository.flagSource(), application.id(), clusterSpec.vespaVersion());
- return new ClusterModel(features,
+ var params = ClusterAllocationParams.from(nodeRepository.flagSource(), application.id(), clusterSpec.vespaVersion());
+ return new ClusterModel(params,
nodeRepository,
application.with(status),
clusterSpec, cluster,
- new AllocatableResources(features, clusterResources(), clusterSpec, nodeRepository),
+ new AllocatableResources(params, clusterResources(), clusterSpec, nodeRepository),
clock, Duration.ofMinutes(10), Duration.ofMinutes(5),
timeseries(cluster,100, queryRate, writeRate, clock),
ClusterNodesTimeseries.empty());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
index 33de791f3bc..c79cbb06ad9 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
@@ -26,7 +26,7 @@ import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsHostResourcesCalcu
import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsNodeTypes;
import com.yahoo.vespa.hosted.provision.provisioning.DynamicProvisioningTester;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
-import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationFeatures;
+import com.yahoo.vespa.hosted.provision.provisioning.ClusterAllocationParams;
import java.time.Duration;
import java.util.Arrays;
@@ -40,7 +40,7 @@ import java.util.Optional;
*/
public class Fixture {
- final ClusterAllocationFeatures features;
+ final ClusterAllocationParams params;
final DynamicProvisioningTester tester;
final ApplicationId applicationId;
final ClusterSpec clusterSpec;
@@ -50,7 +50,7 @@ public class Fixture {
Autoscaling lastAutoscaling = Autoscaling.empty();
public Fixture(Fixture.Builder builder, Optional<ClusterResources> initialResources, int hostCount) {
- features = ClusterAllocationFeatures.from(builder.flagSource, builder.application, builder.cluster.vespaVersion());
+ params = ClusterAllocationParams.from(builder.flagSource, builder.application, builder.cluster.vespaVersion());
applicationId = builder.application;
clusterSpec = builder.cluster;
capacity = builder.capacity;
@@ -83,7 +83,7 @@ public class Fixture {
public Capacity capacity() { return capacity; }
public ClusterModel clusterModel() {
- return new ClusterModel(features,
+ return new ClusterModel(params,
tester.nodeRepository(),
application(),
clusterSpec,