aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHåkon Hallingstad <hakon@yahooinc.com>2023-11-05 16:49:09 +0100
committerHåkon Hallingstad <hakon@yahooinc.com>2023-11-05 16:49:09 +0100
commit93db1d721aeda0c1a5e4ac14d3b795a36d92788c (patch)
tree598071e102a1fb1c5c6fab3204bbe853eb5d465a
parent1daa89e21477af3830d95e895b9034949eee9d98 (diff)
Include NodeRepository, ApplcationId, and ClusterSpec as parameters
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java11
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java9
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java45
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java62
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationParams.java84
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java46
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java50
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java28
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java7
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java2
20 files changed, 220 insertions, 178 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
index 119724ac154..4f116630af8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
@@ -34,15 +34,12 @@ public class AllocatableResources {
private final double fulfilment;
/** Fake allocatable resources from requested capacity */
- public AllocatableResources(AllocationParams params,
- ClusterResources requested,
- ClusterSpec clusterSpec,
- NodeRepository nodeRepository) {
+ public AllocatableResources(AllocationParams params, ClusterResources requested) {
this.nodes = requested.nodes();
this.groups = requested.groups();
- this.realResources = nodeRepository.resourcesCalculator().requestToReal(requested.nodeResources(), nodeRepository.exclusiveAllocation(params, clusterSpec), false);
+ this.realResources = params.nodeRepository().resourcesCalculator().requestToReal(requested.nodeResources(), params.exclusiveAllocation(), false);
this.advertisedResources = requested.nodeResources();
- this.clusterSpec = clusterSpec;
+ this.clusterSpec = params.cluster();
this.fulfilment = 1;
}
@@ -178,7 +175,7 @@ public class AllocatableResources {
ClusterModel model,
NodeRepository nodeRepository) {
var systemLimits = nodeRepository.nodeResourceLimits();
- boolean exclusive = nodeRepository.exclusiveAllocation(params, clusterSpec);
+ boolean exclusive = params.exclusiveAllocation();
if (! exclusive) {
// We decide resources: Add overhead to what we'll request (advertised) to make sure real becomes (at least) cappedNodeResources
var allocatableResources = calculateAllocatableResources(params,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index c597dc60e70..da6636b9ebc 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -44,7 +44,7 @@ public class AllocationOptimizer {
new ClusterResources(maximumNodes, maximumNodes, NodeResources.unspecified()),
IntRange.empty());
else
- limits = atLeast(minimumNodes, limits).fullySpecified(params, model.current().clusterSpec(), nodeRepository, model.application().id());
+ limits = atLeast(minimumNodes, limits).fullySpecified(params, nodeRepository);
Optional<AllocatableResources> bestAllocation = Optional.empty();
var availableRealHostResources = nodeRepository.zone().cloud().dynamicProvisioning()
? nodeRepository.flavors().getFlavors().stream().map(flavor -> flavor.resources()).toList()
@@ -77,7 +77,7 @@ public class AllocationOptimizer {
/** Returns the max resources of a host one node may allocate. */
private NodeResources maxResourcesOf(AllocationParams params, NodeResources hostResources, ClusterModel model) {
- if (nodeRepository.exclusiveAllocation(params, model.clusterSpec())) return hostResources;
+ if (params.exclusiveAllocation()) return hostResources;
// static, shared hosts: Allocate at most half of the host cpu to simplify management
return hostResources.withVcpu(hostResources.vcpu() / 2);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index e739b249696..59dfc167d6a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.provision.autoscale;
import com.yahoo.config.provision.ClusterResources;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
@@ -55,14 +56,14 @@ public class Autoscaler {
}
private Autoscaling autoscale(Application application, Cluster cluster, NodeList clusterNodes, Limits limits) {
- AllocationParams params = AllocationParams.from(nodeRepository.flagSource(), application.id(), clusterNodes.clusterSpec().vespaVersion());
+ NodeList notRetired = clusterNodes.not().retired();
+ ClusterSpec clusterSpec = notRetired.clusterSpec();
+ AllocationParams params = AllocationParams.from(nodeRepository, application.id(), clusterSpec, clusterNodes.clusterSpec().vespaVersion());
var model = new ClusterModel(params,
- nodeRepository,
application,
- clusterNodes.not().retired().clusterSpec(),
cluster,
clusterNodes,
- new AllocatableResources(clusterNodes.not().retired(), nodeRepository),
+ new AllocatableResources(notRetired, nodeRepository),
nodeRepository.metricsDb(),
nodeRepository.clock());
if (model.isEmpty()) return Autoscaling.empty();
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index 4b82e631c98..de132d53d63 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -48,9 +48,7 @@ public class ClusterModel {
private static final double fixedCpuCostFraction = 0.1;
private final AllocationParams params;
- private final NodeRepository nodeRepository;
private final Application application;
- private final ClusterSpec clusterSpec;
private final Cluster cluster;
private final AllocatableResources current;
@@ -77,33 +75,27 @@ public class ClusterModel {
private OptionalDouble averageQueryRate = null;
public ClusterModel(AllocationParams params,
- NodeRepository nodeRepository,
Application application,
- ClusterSpec clusterSpec,
Cluster cluster,
NodeList clusterNodes,
AllocatableResources current,
MetricsDb metricsDb,
Clock clock) {
this.params = params;
- this.nodeRepository = nodeRepository;
this.application = application;
- this.clusterSpec = clusterSpec;
this.cluster = cluster;
this.nodes = clusterNodes;
this.current = current;
this.clock = clock;
- this.scalingDuration = cluster.scalingDuration(clusterSpec);
- this.allocationDuration = cluster.allocationDuration(clusterSpec);
+ this.scalingDuration = cluster.scalingDuration(params.cluster());
+ this.allocationDuration = cluster.allocationDuration(params.cluster());
this.clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id());
this.nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb);
this.at = clock.instant();
}
ClusterModel(AllocationParams params,
- NodeRepository nodeRepository,
Application application,
- ClusterSpec clusterSpec,
Cluster cluster,
AllocatableResources current,
Clock clock,
@@ -112,9 +104,7 @@ public class ClusterModel {
ClusterTimeseries clusterTimeseries,
ClusterNodesTimeseries nodeTimeseries) {
this.params = params;
- this.nodeRepository = nodeRepository;
this.application = application;
- this.clusterSpec = clusterSpec;
this.cluster = cluster;
this.nodes = NodeList.of();
this.current = current;
@@ -128,7 +118,7 @@ public class ClusterModel {
}
public Application application() { return application; }
- public ClusterSpec clusterSpec() { return clusterSpec; }
+ public ClusterSpec clusterSpec() { return params.cluster(); }
public AllocatableResources current() { return current; }
private ClusterNodesTimeseries nodeTimeseries() { return nodeTimeseries; }
private ClusterTimeseries clusterTimeseries() { return clusterTimeseries; }
@@ -150,7 +140,7 @@ public class ClusterModel {
public Duration allocationDuration() { return allocationDuration; }
public boolean isContent() {
- return clusterSpec.type().isContent();
+ return params.cluster().type().isContent();
}
/** Returns the predicted duration of data redistribution in this cluster. */
@@ -175,7 +165,7 @@ public class ClusterModel {
}
public boolean isExclusive() {
- return nodeRepository.exclusiveAllocation(params, clusterSpec);
+ return params.exclusiveAllocation();
}
/** Returns the relative load adjustment that should be made to this cluster given available measurements. */
@@ -283,7 +273,7 @@ public class ClusterModel {
* cluster.bcpGroupInfo().growthRateHeadroom() * trafficShiftHeadroom();
double neededTotalVcpuPerGroup = cluster.bcpGroupInfo().cpuCostPerQuery() * targetQueryRateToHandle / groupCount() +
( 1 - cpu.queryFraction()) * cpu.idealLoad() *
- (clusterSpec.type().isContainer() ? 1 : groupSize());
+ (params.cluster().type().isContainer() ? 1 : groupSize());
// Max 1: Only use bcp group info if it indicates that we need to scale *up*
double cpuAdjustment = Math.max(1.0, neededTotalVcpuPerGroup / currentClusterTotalVcpuPerGroup);
return ideal.withCpu(ideal.cpu() / cpuAdjustment);
@@ -347,7 +337,7 @@ public class ClusterModel {
/** Returns the headroom for growth during organic traffic growth as a multiple of current resources. */
private double growthRateHeadroom() {
- if ( ! nodeRepository.zone().environment().isProduction()) return 1;
+ if ( ! params.nodeRepository().zone().environment().isProduction()) return 1;
double growthRateHeadroom = 1 + maxQueryGrowthRate() * scalingDuration().toMinutes();
// Cap headroom at 10% above the historical observed peak
if (queryFractionOfMax() != 0)
@@ -361,7 +351,7 @@ public class ClusterModel {
* as a multiple of current resources.
*/
private double trafficShiftHeadroom() {
- if ( ! nodeRepository.zone().environment().isProduction()) return 1;
+ if ( ! params.nodeRepository().zone().environment().isProduction()) return 1;
if (canRescaleWithinBcpDeadline()) return 1;
double trafficShiftHeadroom;
if (application.status().maxReadShare() == 0) // No traffic fraction data
@@ -397,7 +387,7 @@ public class ClusterModel {
OptionalDouble costPerQuery() {
if (averageQueryRate().isEmpty() || averageQueryRate().getAsDouble() == 0.0) return OptionalDouble.empty();
// TODO: Query rate should generally be sampled at the time where we see the peak resource usage
- int fanOut = clusterSpec.type().isContainer() ? 1 : groupSize();
+ int fanOut = params.cluster().type().isContainer() ? 1 : groupSize();
return OptionalDouble.of(peakLoad().cpu() * cpu.queryFraction() * fanOut * nodes.not().retired().first().get().resources().vcpu()
/ averageQueryRate().getAsDouble() / groupCount());
}
@@ -420,8 +410,8 @@ public class ClusterModel {
private class MemoryModel {
double idealLoad() {
- if (clusterSpec.type().isContainer()) return idealContainerMemoryLoad;
- if (clusterSpec.type() == ClusterSpec.Type.admin) return idealContainerMemoryLoad; // Not autoscaled, but ideal shown in console
+ if (params.cluster().type().isContainer()) return idealContainerMemoryLoad;
+ if (params.cluster().type() == ClusterSpec.Type.admin) return idealContainerMemoryLoad; // Not autoscaled, but ideal shown in console
return idealContentMemoryLoad;
}
@@ -438,17 +428,12 @@ public class ClusterModel {
double averageReal() {
if (nodes.isEmpty()) { // we're estimating
- var initialResources = new CapacityPolicies(nodeRepository).specifyFully(params,
- cluster.minResources().nodeResources(),
- clusterSpec,
- application.id());
- return nodeRepository.resourcesCalculator().requestToReal(initialResources,
- nodeRepository.exclusiveAllocation(params, clusterSpec),
- false).memoryGb();
+ var initialResources = new CapacityPolicies(params.nodeRepository()).specifyFully(params, cluster.minResources().nodeResources());
+ return params.nodeRepository().resourcesCalculator().requestToReal(initialResources, params.exclusiveAllocation(), false).memoryGb();
}
else {
return nodes.stream()
- .mapToDouble(node -> nodeRepository.resourcesCalculator().realResourcesOf(node, nodeRepository).memoryGb())
+ .mapToDouble(node -> params.nodeRepository().resourcesCalculator().realResourcesOf(node, params.nodeRepository()).memoryGb())
.average()
.getAsDouble();
}
@@ -461,7 +446,7 @@ public class ClusterModel {
double idealLoad() {
// Stateless clusters are not expected to consume more disk over time -
// if they do it is due to logs which will be rotated away right before the disk is full
- return clusterSpec.isStateful() ? idealContentDiskLoad : idealContainerDiskLoad;
+ return params.cluster().isStateful() ? idealContentDiskLoad : idealContainerDiskLoad;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
index 8f52f8c5c9f..54e4704987a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
@@ -66,12 +66,12 @@ public class Limits {
return resources;
}
- public Limits fullySpecified(AllocationParams params, ClusterSpec clusterSpec, NodeRepository nodeRepository, ApplicationId applicationId) {
+ public Limits fullySpecified(AllocationParams params, NodeRepository nodeRepository) {
if (this.isEmpty()) throw new IllegalStateException("Unspecified limits can not be made fully specified");
var capacityPolicies = new CapacityPolicies(nodeRepository);
- return new Limits(capacityPolicies.specifyFully(params, min, clusterSpec, applicationId),
- capacityPolicies.specifyFully(params, max, clusterSpec, applicationId),
+ return new Limits(capacityPolicies.specifyFully(params, min),
+ capacityPolicies.specifyFully(params, max),
groupSize);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
index 50ccad911b5..e8cd2e7b6fe 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java
@@ -157,15 +157,13 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
}
private List<Node> provision(NodeList nodeList) {
- ApplicationId application = ApplicationId.defaultId();
- var params = AllocationParams.from(flagSource, application, Vtag.currentVersion);
-
- return provisionUntilNoDeficit(params, application, nodeList).stream()
- .sorted(comparing(node -> node.history().events().stream()
- .map(History.Event::at)
- .min(naturalOrder())
- .orElse(Instant.MIN)))
- .toList();
+ return provisionUntilNoDeficit(nodeList)
+ .stream()
+ .sorted(comparing(node -> node.history().events().stream()
+ .map(History.Event::at)
+ .min(naturalOrder())
+ .orElse(Instant.MIN)))
+ .toList();
}
private static boolean canRemoveHost(Node host) {
@@ -191,7 +189,9 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
* @throws IllegalStateException if there was an algorithmic problem, and in case message
* should be sufficient (avoid no stack trace).
*/
- private List<Node> provisionUntilNoDeficit(AllocationParams params, ApplicationId application, NodeList nodeList) {
+ private List<Node> provisionUntilNoDeficit(NodeList nodeList) {
+ // Ensure flags for allocation params are evaluated before provision loop. Use a dummy cluster spec
+ var params = AllocationParams.from(nodeRepository(), ApplicationId.defaultId(), asClusterSpec(Optional.empty(), 0), Vtag.currentVersion);
List<ClusterCapacity> preprovisionCapacity = preprovisionCapacityFlag.value();
// Worst-case each ClusterCapacity in preprovisionCapacity will require an allocation.
@@ -200,7 +200,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
var nodesPlusProvisioned = new ArrayList<>(nodeList.asList());
for (int numProvisions = 0;; ++numProvisions) {
var nodesPlusProvisionedPlusAllocated = new ArrayList<>(nodesPlusProvisioned);
- Optional<ClusterCapacity> deficit = allocatePreprovisionCapacity(application, preprovisionCapacity, nodesPlusProvisionedPlusAllocated, params);
+ Optional<ClusterCapacity> deficit = allocatePreprovisionCapacity(params, preprovisionCapacity, nodesPlusProvisionedPlusAllocated);
if (deficit.isEmpty()) {
return nodesPlusProvisionedPlusAllocated;
}
@@ -225,7 +225,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
}
Version osVersion = nodeRepository().osVersions().targetFor(NodeType.host).orElse(Version.emptyVersion);
List<Integer> provisionIndices = nodeRepository().database().readProvisionIndices(count);
- HostSharing sharingMode = nodeRepository().exclusiveAllocation(params, asSpec(clusterType, 0)) ? HostSharing.exclusive : HostSharing.shared;
+ HostSharing sharingMode = params.exclusiveAllocation() ? HostSharing.exclusive : HostSharing.shared;
HostProvisionRequest request = new HostProvisionRequest(provisionIndices, NodeType.host, nodeResources,
ApplicationId.defaultId(), osVersion,
sharingMode, clusterType.map(ClusterSpec.Type::valueOf), Optional.empty(),
@@ -233,7 +233,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
List<Node> hosts = new ArrayList<>();
Runnable waiter;
try (var lock = nodeRepository().nodes().lockUnallocated()) {
- waiter = hostProvisioner.provisionHosts(params,
+ waiter = hostProvisioner.provisionHosts(params.sharedHost(),
request,
resources -> true,
provisionedHosts -> {
@@ -261,14 +261,18 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
* they are added to {@code mutableNodes}
* @return the part of a cluster capacity it was unable to allocate, if any
*/
- private Optional<ClusterCapacity> allocatePreprovisionCapacity(ApplicationId application,
+ private Optional<ClusterCapacity> allocatePreprovisionCapacity(AllocationParams params,
List<ClusterCapacity> preprovisionCapacity,
- ArrayList<Node> mutableNodes,
- AllocationParams params) {
+ ArrayList<Node> mutableNodes) {
for (int clusterIndex = 0; clusterIndex < preprovisionCapacity.size(); ++clusterIndex) {
ClusterCapacity clusterCapacity = preprovisionCapacity.get(clusterIndex);
+
+ params = params.with(asClusterSpec(Optional.ofNullable(clusterCapacity.clusterType()), clusterIndex));
+ if (params.exclusiveProvisioning())
+ throw new IllegalStateException("Preprovision cluster requires exclusive provisioning: " + clusterCapacity);
+
LockedNodeList allNodes = new LockedNodeList(mutableNodes, () -> {});
- List<Node> candidates = findCandidates(application, clusterCapacity, clusterIndex, allNodes, params);
+ List<Node> candidates = findCandidates(params, clusterCapacity, allNodes);
int deficit = Math.max(0, clusterCapacity.count() - candidates.size());
if (deficit > 0) {
return Optional.of(clusterCapacity.withCount(deficit));
@@ -281,28 +285,26 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
return Optional.empty();
}
- private List<Node> findCandidates(ApplicationId application, ClusterCapacity clusterCapacity, int clusterIndex,
- LockedNodeList allNodes, AllocationParams params) {
+ private List<Node> findCandidates(AllocationParams params, ClusterCapacity clusterCapacity, LockedNodeList allNodes) {
NodeResources nodeResources = toNodeResources(clusterCapacity);
// We'll allocate each ClusterCapacity as a unique cluster in a dummy application
- ClusterSpec cluster = asSpec(Optional.ofNullable(clusterCapacity.clusterType()), clusterIndex);
NodeSpec nodeSpec = NodeSpec.from(clusterCapacity.count(), 1, nodeResources, false, true,
nodeRepository().zone().cloud().account(), Duration.ZERO);
var allocationContext = IP.Allocation.Context.from(nodeRepository().zone().cloud().name(),
nodeSpec.cloudAccount().isExclave(nodeRepository().zone()),
nodeRepository().nameResolver());
- NodePrioritizer prioritizer = new NodePrioritizer(allNodes, application, cluster, nodeSpec,
+ NodePrioritizer prioritizer = new NodePrioritizer(allNodes, params.application(), params.cluster(), nodeSpec,
true, false, allocationContext, nodeRepository().nodes(),
nodeRepository().resourcesCalculator(), nodeRepository().spareCount(),
- nodeRepository().exclusiveAllocation(params, cluster), params);
+ params.exclusiveAllocation(), params);
List<NodeCandidate> nodeCandidates = prioritizer.collect()
.stream()
- .filter(node -> node.violatesExclusivity(cluster,
- application,
- nodeRepository().exclusiveClusterType(params, cluster),
- nodeRepository().exclusiveAllocation(params, cluster),
- false,
+ .filter(node -> node.violatesExclusivity(params.cluster(),
+ params.application(),
+ params.exclusiveClusterType(),
+ params.exclusiveAllocation(),
+ params.exclusiveProvisioning(),
nodeRepository().zone().cloud().allowHostSharing(),
allNodes,
params)
@@ -313,14 +315,14 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer {
.stream()
.limit(clusterCapacity.count())
.map(candidate -> candidate.toNode()
- .allocate(application,
- ClusterMembership.from(cluster, index.next()),
+ .allocate(params.application(),
+ ClusterMembership.from(params.cluster(), index.next()),
nodeResources,
nodeRepository().clock().instant()))
.toList();
}
- private static ClusterSpec asSpec(Optional<String> clusterType, int index) {
+ private static ClusterSpec asClusterSpec(Optional<String> clusterType, int index) {
return ClusterSpec.request(clusterType.map(ClusterSpec.Type::from).orElse(ClusterSpec.Type.content),
ClusterSpec.Id.from(String.valueOf(index)))
.vespaVersion(Vtag.currentVersion) // Needed, but should not be used here.
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
index 7fcd5322675..e7b86c32618 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostFlavorUpgrader.java
@@ -1,6 +1,7 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Deployer;
import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.NodeResources;
@@ -72,8 +73,9 @@ public class HostFlavorUpgrader extends NodeRepositoryMaintainer {
if (parent.isEmpty()) continue;
if (exhaustedFlavors.contains(parent.get().flavor().name())) continue;
Allocation allocation = node.allocation().get();
- var params = AllocationParams.from(nodeRepository().flagSource(), allocation.owner(), allocation.membership().cluster().vespaVersion());
- Predicate<NodeResources> realHostResourcesWithinLimits = resources -> nodeRepository().nodeResourceLimits().isWithinRealLimits(params, resources, allocation.owner(), allocation.membership().cluster());
+ ClusterSpec cluster = allocation.membership().cluster();
+ var params = AllocationParams.from(nodeRepository(), allocation.owner(), cluster, cluster.vespaVersion());
+ Predicate<NodeResources> realHostResourcesWithinLimits = resources -> nodeRepository().nodeResourceLimits().isWithinRealLimits(params, resources, allocation.owner(), cluster);
if (!hostProvisioner.canUpgradeFlavor(parent.get(), node, realHostResourcesWithinLimits)) continue;
if (parent.get().status().wantToUpgradeFlavor() && allocation.membership().retired()) continue; // Already upgrading
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationParams.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationParams.java
index 50601978bba..59a1f7b025a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationParams.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/AllocationParams.java
@@ -3,27 +3,87 @@ package com.yahoo.vespa.hosted.provision.provisioning;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.flags.FetchVector;
-import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.custom.SharedHost;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
+
+import static java.util.Objects.requireNonNull;
/**
- * Miscellaneous constants used while preparing an allocation for a cluster.
+ * Miscellaneous parameters for the preparation of an allocation of a cluster.
*
- * <p>Typically used to access feature flags that must be evaluated once at the start of the preparation,
- * to avoid inconsistencies if evaluated multiple times during.</p>
+ * <p>Ideal for feature flags that guards new code paths in various parts of the allocation code.</p>
*
+ * @param exclusiveClusterType whether nodes must be allocated to hosts that are exclusive to the cluster type
+ * @param exclusiveAllocation whether nodes are allocated exclusively in this instance given this cluster spec.
+ * Exclusive allocation requires that the wanted node resources matches the advertised
+ * resources of the node perfectly
+ * @param exclusiveProvisioning Whether the nodes of this cluster must be running on hosts that are specifically provisioned for the application
+ * @param sharedHost snapshot of shared-host flag
+ * @param makeExclusive snapshot of make-exclusive flag
* @author hakonhall
*/
-public record AllocationParams(boolean makeExclusive, SharedHost sharedHost) {
- public static AllocationParams from(FlagSource flagSource, ApplicationId application, Version version) {
- return new AllocationParams(Flags.MAKE_EXCLUSIVE.bindTo(flagSource)
- .with(FetchVector.Dimension.TENANT_ID, application.tenant().value())
- .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm())
- .with(FetchVector.Dimension.VESPA_VERSION, version.toFullString())
- .value(),
- PermanentFlags.SHARED_HOST.bindTo(flagSource).value());
+public record AllocationParams(NodeRepository nodeRepository,
+ ApplicationId application,
+ ClusterSpec cluster,
+ boolean exclusiveClusterType,
+ boolean exclusiveAllocation,
+ boolean exclusiveProvisioning,
+ SharedHost sharedHost,
+ boolean makeExclusive) {
+
+ public AllocationParams {
+ requireNonNull(nodeRepository, "nodeRepository cannot be null");
+ requireNonNull(application, "application cannot be null");
+ requireNonNull(cluster, "cluster cannot be null");
+ requireNonNull(sharedHost, "sharedHost cannot be null");
+ }
+
+ /** The canonical way of constructing an instance: ensures consistencies between the various parameters. */
+ public static AllocationParams from(NodeRepository nodeRepository, ApplicationId application, ClusterSpec cluster, Version version) {
+ return from(nodeRepository,
+ application,
+ cluster,
+ PermanentFlags.SHARED_HOST.bindTo(nodeRepository.flagSource()).value(),
+ Flags.MAKE_EXCLUSIVE.bindTo(nodeRepository.flagSource())
+ .with(FetchVector.Dimension.TENANT_ID, application.tenant().value())
+ .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm())
+ .with(FetchVector.Dimension.VESPA_VERSION, version.toFullString())
+ .value());
+ }
+
+ /**
+ * Returns the same allocation parameters, but as-if it was built with the given cluster. Flags are NOT re-evaluated,
+ * but exclusivity may change.
+ */
+ public AllocationParams with(ClusterSpec cluster) { return from(nodeRepository, application, cluster, sharedHost, makeExclusive); }
+
+ private static AllocationParams from(NodeRepository nodeRepository, ApplicationId application, ClusterSpec cluster, SharedHost sharedHost, boolean makeExclusive) {
+ return new AllocationParams(nodeRepository,
+ application,
+ cluster,
+ exclusiveClusterType(cluster, sharedHost),
+ exclusiveAllocation(nodeRepository.zone(), cluster, sharedHost),
+ exclusiveProvisioning(nodeRepository.zone(), cluster),
+ sharedHost,
+ makeExclusive);
+ }
+
+ private static boolean exclusiveClusterType(ClusterSpec cluster, SharedHost sharedHost) {
+ return sharedHost.hasClusterType(cluster.type().name());
+ }
+
+ private static boolean exclusiveAllocation(Zone zone, ClusterSpec cluster, SharedHost sharedHost) {
+ return cluster.isExclusive() ||
+ ( cluster.type().isContainer() && zone.system().isPublic() && !zone.environment().isTest() ) ||
+ ( !zone.cloud().allowHostSharing() && !sharedHost.supportsClusterType(cluster.type().name()));
+ }
+
+ private static boolean exclusiveProvisioning(Zone zone, ClusterSpec clusterSpec) {
+ return !zone.cloud().allowHostSharing() && clusterSpec.isExclusive();
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 2986e4efabb..73010e3c47c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -10,6 +10,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeResources.DiskSpeed;
+import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.StringFlag;
@@ -86,47 +87,47 @@ public class CapacityPolicies {
return target;
}
- public ClusterResources specifyFully(AllocationParams params, ClusterResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
- return resources.with(specifyFully(params, resources.nodeResources(), clusterSpec, applicationId));
+ public ClusterResources specifyFully(AllocationParams params, ClusterResources resources) {
+ return resources.with(specifyFully(params, resources.nodeResources()));
}
- public NodeResources specifyFully(AllocationParams params, NodeResources resources, ClusterSpec clusterSpec, ApplicationId applicationId) {
- NodeResources amended = resources.withUnspecifiedFieldsFrom(defaultResources(params, clusterSpec, applicationId).with(DiskSpeed.any));
+ public NodeResources specifyFully(AllocationParams params, NodeResources resources) {
+ NodeResources amended = resources.withUnspecifiedFieldsFrom(defaultResources(params).with(DiskSpeed.any));
// TODO jonmv: remove this after all apps are 8.248.8 or above; architecture for admin nodes was not picked up before this.
- if (clusterSpec.vespaVersion().isBefore(Version.fromString("8.248.8"))) amended = amended.with(resources.architecture());
+ if (params.cluster().vespaVersion().isBefore(Version.fromString("8.248.8"))) amended = amended.with(resources.architecture());
return amended;
}
- private NodeResources defaultResources(AllocationParams params, ClusterSpec clusterSpec, ApplicationId applicationId) {
- if (clusterSpec.type() == ClusterSpec.Type.admin) {
- Architecture architecture = adminClusterArchitecture(applicationId);
+ private NodeResources defaultResources(AllocationParams params) {
+ if (params.cluster().type() == ClusterSpec.Type.admin) {
+ Architecture architecture = adminClusterArchitecture(params.application());
- if (nodeRepository.exclusiveAllocation(params, clusterSpec)) {
+ if (nodeRepository.exclusiveAllocation(params, params.cluster())) {
return smallestExclusiveResources().with(architecture);
}
- if (clusterSpec.id().value().equals("cluster-controllers")) {
- return clusterControllerResources(clusterSpec, architecture).with(architecture);
+ if (params.cluster().id().value().equals("cluster-controllers")) {
+ return clusterControllerResources(params.cluster(), architecture).with(architecture);
}
- if (clusterSpec.id().value().equals("logserver")) {
+ if (params.cluster().id().value().equals("logserver")) {
return logserverResources(architecture).with(architecture);
}
- return versioned(clusterSpec, Map.of(new Version(0), smallestSharedResources())).with(architecture);
+ return versioned(params.cluster(), Map.of(new Version(0), smallestSharedResources())).with(architecture);
}
- if (clusterSpec.type() == ClusterSpec.Type.content) {
+ if (params.cluster().type() == ClusterSpec.Type.content) {
// When changing defaults here update cloud.vespa.ai/en/reference/services
return zone.cloud().dynamicProvisioning()
- ? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2, 16, 300, 0.3)))
- : versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
+ ? versioned(params.cluster(), Map.of(new Version(0), new NodeResources(2, 16, 300, 0.3)))
+ : versioned(params.cluster(), Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
}
else {
// When changing defaults here update cloud.vespa.ai/en/reference/services
return zone.cloud().dynamicProvisioning()
- ? versioned(clusterSpec, Map.of(new Version(0), new NodeResources(2.0, 8, 50, 0.3)))
- : versioned(clusterSpec, Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
+ ? versioned(params.cluster(), Map.of(new Version(0), new NodeResources(2.0, 8, 50, 0.3)))
+ : versioned(params.cluster(), Map.of(new Version(0), new NodeResources(1.5, 8, 50, 0.3)));
}
}
@@ -177,10 +178,11 @@ public class CapacityPolicies {
}
/** Returns whether the nodes requested can share physical host with other applications */
- public ClusterSpec decideExclusivity(Capacity capacity, ClusterSpec requestedCluster) {
- if (capacity.cloudAccount().isPresent()) return requestedCluster.withExclusivity(true); // Implicit exclusive
- boolean exclusive = requestedCluster.isExclusive() && (capacity.isRequired() || zone.environment() == Environment.prod);
- return requestedCluster.withExclusivity(exclusive);
+ public boolean decideExclusivity(Capacity capacity, ClusterSpec requestedCluster) {
+ if (capacity.type() != NodeType.tenant) return true;
+ if (capacity.cloudAccount().isPresent()) return true;
+ if (!requestedCluster.isExclusive()) return false;
+ return capacity.isRequired() || zone.environment() == Environment.prod;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
index dd70d4e6ccd..ebb7081e366 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.HostEvent;
import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.NodeResources;
+import com.yahoo.vespa.flags.custom.SharedHost;
import com.yahoo.vespa.hosted.provision.Node;
import java.util.Collection;
@@ -53,7 +54,7 @@ public interface HostProvisioner {
* @return a runnable that waits for the provisioning request to finish. It can be run without holding any locks,
* but may fail with an exception that should be propagated to the user initiating prepare()
*/
- Runnable provisionHosts(AllocationParams params,
+ Runnable provisionHosts(SharedHost sharedHost,
HostProvisionRequest request,
Predicate<NodeResources> realHostResourcesWithinLimits,
Consumer<List<ProvisionedHost>> whenProvisioned) throws NodeAllocationException;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index cfb95ce835e..4b636baf878 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -85,15 +85,16 @@ public class NodeRepositoryProvisioner implements Provisioner {
public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
log.log(Level.FINE, "Received deploy prepare request for " + requested +
" for application " + application + ", cluster " + cluster);
- var params = AllocationParams.from(nodeRepository.flagSource(), application, cluster.vespaVersion());
- validate(params, application, cluster, requested, logger);
+ var params = AllocationParams.from(nodeRepository, application, cluster, cluster.vespaVersion());
+ validate(params, requested, logger);
+
+ params = params.with(cluster.withExclusivity(capacityPolicies.decideExclusivity(requested, cluster)));
NodeResources resources;
NodeSpec nodeSpec;
if (requested.type() == NodeType.tenant) {
- cluster = capacityPolicies.decideExclusivity(requested, cluster);
Capacity actual = capacityPolicies.applyOn(requested, application, cluster.isExclusive());
- ClusterResources target = decideTargetResources(params, application, cluster, actual);
+ ClusterResources target = decideTargetResources(params, actual);
validate(actual, target, cluster, application);
logIfDownscaled(requested.minResources().nodes(), actual.minResources().nodes(), cluster, logger);
@@ -103,7 +104,6 @@ public class NodeRepositoryProvisioner implements Provisioner {
requested.clusterInfo().hostTTL());
}
else {
- cluster = cluster.withExclusivity(true);
resources = getNodeResources(params, cluster, requested.minResources().nodeResources(), application);
nodeSpec = NodeSpec.from(requested.type(), nodeRepository.zone().cloud().account());
}
@@ -111,16 +111,16 @@ public class NodeRepositoryProvisioner implements Provisioner {
requireCompatibleResources(resources, cluster));
}
- private void validate(AllocationParams params, ApplicationId application, ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
- if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");
+ private void validate(AllocationParams params, Capacity requested, ProvisionLogger logger) {
+ if (params.cluster().group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group");
- nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits(params, "Min", requested.minResources().nodeResources(), application, cluster);
- nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits(params, "Max", requested.maxResources().nodeResources(), application, cluster);
+ nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits(params, "Min", requested.minResources().nodeResources());
+ nodeRepository.nodeResourceLimits().ensureWithinAdvertisedLimits(params, "Max", requested.maxResources().nodeResources());
if (!requested.minResources().nodeResources().gpuResources().equals(requested.maxResources().nodeResources().gpuResources()))
throw new IllegalArgumentException(requested + " is invalid: GPU capacity cannot have ranges");
- logInsufficientDiskResources(cluster, requested, logger);
+ logInsufficientDiskResources(params.cluster(), requested, logger);
}
private void logInsufficientDiskResources(ClusterSpec cluster, Capacity requested, ProvisionLogger logger) {
@@ -134,7 +134,7 @@ public class NodeRepositoryProvisioner implements Provisioner {
}
private NodeResources getNodeResources(AllocationParams params, ClusterSpec cluster, NodeResources nodeResources, ApplicationId applicationId) {
- return capacityPolicies.specifyFully(params, nodeResources, cluster, applicationId);
+ return capacityPolicies.specifyFully(params, nodeResources);
}
@Override
@@ -166,37 +166,33 @@ public class NodeRepositoryProvisioner implements Provisioner {
* Returns the target cluster resources, a value between the min and max in the requested capacity,
* and updates the application store with the received min and max.
*/
- private ClusterResources decideTargetResources(AllocationParams params, ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) {
- try (Mutex lock = nodeRepository.applications().lock(applicationId)) {
- var application = nodeRepository.applications().get(applicationId).orElse(Application.empty(applicationId))
- .withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested);
+ private ClusterResources decideTargetResources(AllocationParams params, Capacity requested) {
+ try (Mutex lock = nodeRepository.applications().lock(params.application())) {
+ var application = nodeRepository.applications().get(params.application()).orElse(Application.empty(params.application()))
+ .withCluster(params.cluster().id(), params.cluster().isExclusive(), requested);
nodeRepository.applications().put(application, lock);
- var cluster = application.cluster(clusterSpec.id()).get();
- return cluster.target().resources().orElseGet(() -> currentResources(params, application, clusterSpec, cluster, requested));
+ var cluster = application.cluster(params.cluster().id()).get();
+ return cluster.target().resources().orElseGet(() -> currentResources(params, application, cluster, requested));
}
}
/** Returns the current resources of this cluster, or requested min if none */
- private ClusterResources currentResources(AllocationParams params,
- Application application,
- ClusterSpec clusterSpec,
- Cluster cluster,
- Capacity requested) {
+ private ClusterResources currentResources(AllocationParams params, Application application, Cluster cluster, Capacity requested) {
NodeList nodes = nodeRepository.nodes().list(Node.State.active).owner(application.id())
- .cluster(clusterSpec.id())
+ .cluster(params.cluster().id())
.not().retired()
.not().removable();
boolean firstDeployment = nodes.isEmpty();
var current =
firstDeployment // start at min, preserve current resources otherwise
- ? new AllocatableResources(params, initialResourcesFrom(params, requested, clusterSpec, application.id()), clusterSpec, nodeRepository)
+ ? new AllocatableResources(params, initialResourcesFrom(params, requested))
: new AllocatableResources(nodes, nodeRepository);
- var model = new ClusterModel(params, nodeRepository, application, clusterSpec, cluster, nodes, current, nodeRepository.metricsDb(), nodeRepository.clock());
+ var model = new ClusterModel(params, application, cluster, nodes, current, nodeRepository.metricsDb(), nodeRepository.clock());
return within(params, Limits.of(requested), model, firstDeployment);
}
- private ClusterResources initialResourcesFrom(AllocationParams params, Capacity requested, ClusterSpec clusterSpec, ApplicationId applicationId) {
- return capacityPolicies.specifyFully(params, requested.minResources(), clusterSpec, applicationId);
+ private ClusterResources initialResourcesFrom(AllocationParams params, Capacity requested) {
+ return capacityPolicies.specifyFully(params, requested.minResources());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index dee594f16bb..b9f1f89e9c3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -31,14 +31,14 @@ public class NodeResourceLimits {
}
/** Validates the resources applications ask for (which are in "advertised" resource space) */
- public void ensureWithinAdvertisedLimits(AllocationParams params, String type, NodeResources requested, ApplicationId applicationId, ClusterSpec cluster) {
- boolean exclusive = nodeRepository.exclusiveAllocation(params, cluster);
- if (! requested.vcpuIsUnspecified() && requested.vcpu() < minAdvertisedVcpu(applicationId, cluster, exclusive))
- illegal(type, "vcpu", "", cluster, requested.vcpu(), minAdvertisedVcpu(applicationId, cluster, exclusive));
- if (! requested.memoryGbIsUnspecified() && requested.memoryGb() < minAdvertisedMemoryGb(applicationId, cluster, exclusive))
- illegal(type, "memoryGb", "Gb", cluster, requested.memoryGb(), minAdvertisedMemoryGb(applicationId, cluster, exclusive));
+ public void ensureWithinAdvertisedLimits(AllocationParams params, String type, NodeResources requested) {
+ boolean exclusive = params.exclusiveAllocation();
+ if (! requested.vcpuIsUnspecified() && requested.vcpu() < minAdvertisedVcpu(params.application(), params.cluster(), exclusive))
+ illegal(type, "vcpu", "", params.cluster(), requested.vcpu(), minAdvertisedVcpu(params.application(), params.cluster(), exclusive));
+ if (! requested.memoryGbIsUnspecified() && requested.memoryGb() < minAdvertisedMemoryGb(params.application(), params.cluster(), exclusive))
+ illegal(type, "memoryGb", "Gb", params.cluster(), requested.memoryGb(), minAdvertisedMemoryGb(params.application(), params.cluster(), exclusive));
if (! requested.diskGbIsUnspecified() && requested.diskGb() < minAdvertisedDiskGb(requested, exclusive))
- illegal(type, "diskGb", "Gb", cluster, requested.diskGb(), minAdvertisedDiskGb(requested, exclusive));
+ illegal(type, "diskGb", "Gb", params.cluster(), requested.diskGb(), minAdvertisedDiskGb(requested, exclusive));
}
// TODO: Remove this when we are ready to fail, not just warn on this. */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index 68027d990ea..58c27bc9802 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -141,7 +141,7 @@ public class Preparer {
deficit.dueToFlavorUpgrade());
Predicate<NodeResources> realHostResourcesWithinLimits = resources ->
nodeRepository.nodeResourceLimits().isWithinRealLimits(params, resources, application, cluster);
- waiter = hostProvisioner.get().provisionHosts(params, request, realHostResourcesWithinLimits, whenProvisioned);
+ waiter = hostProvisioner.get().provisionHosts(params.sharedHost(), request, realHostResourcesWithinLimits, whenProvisioned);
} catch (NodeAllocationException e) {
// Mark the nodes that were written to ZK in the consumer for deprovisioning. While these hosts do
// not exist, we cannot remove them from ZK here because other nodes may already have been
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
index b3a1e7d819d..209d6e530e7 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
@@ -1,6 +1,8 @@
// Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.restapi;
+import com.yahoo.component.Vtag;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.slime.Cursor;
@@ -25,47 +27,45 @@ import java.util.List;
*/
public class ApplicationSerializer {
- public static Slime toSlime(AllocationParams params,
- Application application,
+ public static Slime toSlime(Application application,
NodeList applicationNodes,
NodeRepository nodeRepository,
URI applicationUri) {
Slime slime = new Slime();
- toSlime(params, application, applicationNodes, nodeRepository, slime.setObject(), applicationUri);
+ toSlime(application, applicationNodes, nodeRepository, slime.setObject(), applicationUri);
return slime;
}
- private static void toSlime(AllocationParams params,
- Application application,
+ private static void toSlime(Application application,
NodeList applicationNodes,
NodeRepository nodeRepository,
Cursor object,
URI applicationUri) {
object.setString("url", applicationUri.toString());
object.setString("id", application.id().toFullString());
- clustersToSlime(params, application, applicationNodes, nodeRepository, object.setObject("clusters"));
+ clustersToSlime(application, applicationNodes, nodeRepository, object.setObject("clusters"));
}
- private static void clustersToSlime(AllocationParams params,
- Application application,
+ private static void clustersToSlime(Application application,
NodeList applicationNodes,
NodeRepository nodeRepository,
Cursor clustersObject) {
- application.clusters().values().forEach(cluster -> toSlime(params, application, cluster, applicationNodes, nodeRepository, clustersObject));
+ application.clusters().values().forEach(cluster -> toSlime(application, cluster, applicationNodes, nodeRepository, clustersObject));
}
- private static void toSlime(AllocationParams params,
- Application application,
+ private static void toSlime(Application application,
Cluster cluster,
NodeList applicationNodes,
NodeRepository nodeRepository,
Cursor clustersObject) {
NodeList nodes = applicationNodes.not().retired().cluster(cluster.id());
+ ClusterSpec clusterSpec = nodes.clusterSpec();
if (nodes.isEmpty()) return;
ClusterResources currentResources = nodes.toResources();
Cursor clusterObject = clustersObject.setObject(cluster.id().value());
- clusterObject.setString("type", nodes.clusterSpec().type().name());
- Limits limits = Limits.of(cluster).fullySpecified(params, nodes.clusterSpec(), nodeRepository, application.id());
+ clusterObject.setString("type", clusterSpec.type().name());
+ var params = AllocationParams.from(nodeRepository, application.id(), clusterSpec, clusterSpec.vespaVersion());
+ Limits limits = Limits.of(cluster).fullySpecified(params, nodeRepository);
toSlime(limits.min(), clusterObject.setObject("min"));
toSlime(limits.max(), clusterObject.setObject("max"));
if ( ! cluster.groupSize().isEmpty())
@@ -75,7 +75,7 @@ public class ApplicationSerializer {
toSlime(cluster.suggested(), clusterObject.setObject("suggested"));
toSlime(cluster.target(), clusterObject.setObject("target"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
- clusterObject.setLong("scalingDuration", cluster.scalingDuration(nodes.clusterSpec()).toMillis());
+ clusterObject.setLong("scalingDuration", cluster.scalingDuration(clusterSpec).toMillis());
}
private static void toSlime(Autoscaling autoscaling, Cursor autoscalingObject) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
index 7728e8611da..eddb7fd40ec 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiHandler.java
@@ -465,9 +465,7 @@ public class NodesV2ApiHandler extends ThreadedHttpRequestHandler {
Optional<Application> application = nodeRepository.applications().get(id);
if (application.isEmpty())
return ErrorResponse.notFoundError("No application '" + id + "'");
- var params = AllocationParams.from(nodeRepository.flagSource(), id, Vtag.currentVersion);
- Slime slime = ApplicationSerializer.toSlime(params,
- application.get(),
+ Slime slime = ApplicationSerializer.toSlime(application.get(),
nodeRepository.nodes().list(Node.State.active).owner(id),
nodeRepository,
withPath("/nodes/v2/applications/" + id, uri));
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
index 3814632adce..40734cc25c4 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
@@ -9,6 +9,7 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
+import com.yahoo.vespa.flags.custom.SharedHost;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.IP;
@@ -74,7 +75,7 @@ public class MockHostProvisioner implements HostProvisioner {
}
@Override
- public Runnable provisionHosts(AllocationParams params,
+ public Runnable provisionHosts(SharedHost sharedHost,
HostProvisionRequest request,
Predicate<NodeResources> realHostResourcesWithinLimits,
Consumer<List<ProvisionedHost>> whenProvisioned) throws NodeAllocationException {
@@ -268,7 +269,7 @@ public class MockHostProvisioner implements HostProvisioner {
/** Fail call to {@link MockHostProvisioner#provision(com.yahoo.vespa.hosted.provision.Node)} */
failProvisioning,
- /** Fail call to {@link MockHostProvisioner#provisionHosts(AllocationParams, HostProvisionRequest, Predicate, Consumer)} */
+ /** Fail call to {@link MockHostProvisioner#provisionHosts(SharedHost, HostProvisionRequest, Predicate, Consumer)} */
failProvisionRequest,
/** Fail call to {@link MockHostProvisioner#deprovision(com.yahoo.vespa.hosted.provision.Node)} */
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index 921c059f3bf..613a0434054 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -389,7 +389,7 @@ public class AutoscalingTest {
.build();
NodeResources defaultResources =
- new CapacityPolicies(fixture.tester().nodeRepository()).specifyFully(fixture.params, NodeResources.unspecified(), fixture.clusterSpec, fixture.applicationId);
+ new CapacityPolicies(fixture.tester().nodeRepository()).specifyFully(fixture.allocationParams, NodeResources.unspecified());
fixture.tester().assertResources("Min number of nodes and default resources",
2, 1, defaultResources,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
index 36b2917132a..6f3b348c5b0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
@@ -90,12 +90,11 @@ public class ClusterModelTest {
Cluster cluster = cluster();
application = application.with(cluster);
var nodeRepository = new ProvisioningTester.Builder().build().nodeRepository();
- var params = AllocationParams.from(nodeRepository.flagSource(), application.id(), clusterSpec.vespaVersion());
+ var params = AllocationParams.from(nodeRepository, application.id(), clusterSpec, clusterSpec.vespaVersion());
return new ClusterModel(params,
- nodeRepository,
application.with(status),
- clusterSpec, cluster,
- new AllocatableResources(params, clusterResources(), clusterSpec, nodeRepository),
+ cluster,
+ new AllocatableResources(params, clusterResources()),
clock, Duration.ofMinutes(10), Duration.ofMinutes(5),
timeseries(cluster,100, queryRate, writeRate, clock),
ClusterNodesTimeseries.empty());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
index 58b16c601a0..4c34dd1f7c8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
@@ -40,8 +40,8 @@ import java.util.Optional;
*/
public class Fixture {
- final AllocationParams params;
final DynamicProvisioningTester tester;
+ final AllocationParams allocationParams;
final ApplicationId applicationId;
final ClusterSpec clusterSpec;
final Capacity capacity;
@@ -50,11 +50,11 @@ public class Fixture {
Autoscaling lastAutoscaling = Autoscaling.empty();
public Fixture(Fixture.Builder builder, Optional<ClusterResources> initialResources, int hostCount) {
- params = AllocationParams.from(builder.flagSource, builder.application, builder.cluster.vespaVersion());
applicationId = builder.application;
clusterSpec = builder.cluster;
capacity = builder.capacity;
tester = new DynamicProvisioningTester(builder.zone, builder.resourceCalculator, builder.hostFlavors, builder.flagSource, hostCount);
+ allocationParams = AllocationParams.from(tester.nodeRepository(), builder.application, builder.cluster, builder.cluster.vespaVersion());
var deployCapacity = initialResources.isPresent() ? Capacity.from(initialResources.get()) : capacity;
tester.deploy(builder.application, builder.cluster, deployCapacity);
this.loader = new Loader(this);
@@ -83,10 +83,8 @@ public class Fixture {
public Capacity capacity() { return capacity; }
public ClusterModel clusterModel() {
- return new ClusterModel(params,
- tester.nodeRepository(),
+ return new ClusterModel(allocationParams,
application(),
- clusterSpec,
cluster(),
nodes(),
new AllocatableResources(nodes(), tester.nodeRepository()),
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
index be2b2ca896a..50696583b88 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicProvisioningTester.java
@@ -155,7 +155,7 @@ public class DynamicProvisioningTester {
}
public Autoscaling autoscale(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) {
- capacity = capacityPolicies.applyOn(capacity, applicationId, capacityPolicies.decideExclusivity(capacity, cluster).isExclusive());
+ capacity = capacityPolicies.applyOn(capacity, applicationId, capacityPolicies.decideExclusivity(capacity, cluster));
Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId))
.withCluster(cluster.id(), false, capacity);
try (Mutex lock = nodeRepository().applications().lock(applicationId)) {