diff options
author | Håkon Hallingstad <hakon@yahooinc.com> | 2023-10-26 16:20:34 +0200 |
---|---|---|
committer | Håkon Hallingstad <hakon@yahooinc.com> | 2023-10-26 16:20:34 +0200 |
commit | 9f18ca064bf95f0706bb83d9c22fb6fa2993173c (patch) | |
tree | a4355ba0cad6ca5544b97ed6676738c4d4bd28e7 /node-repository | |
parent | 7dcf21422fc3e3616524342d23bf8cf197fdb6ed (diff) |
Set tenant, instance ID, and Vespa version dimensions of make-exclusive
Diffstat (limited to 'node-repository')
2 files changed, 19 insertions, 10 deletions
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java index be6c420c63b..3c42972ee0b 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java @@ -14,6 +14,7 @@ import com.yahoo.jdisc.Metric; import com.yahoo.lang.MutableInteger; import com.yahoo.transaction.Mutex; import com.yahoo.vespa.flags.BooleanFlag; +import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.FlagSource; import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.flags.ListFlag; @@ -191,7 +192,11 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { */ private List<Node> provisionUntilNoDeficit(NodeList nodeList) { List<ClusterCapacity> preprovisionCapacity = preprovisionCapacityFlag.value(); - boolean makeExclusive = makeExclusiveFlag.value(); + ApplicationId application = ApplicationId.defaultId(); + boolean makeExclusive = makeExclusiveFlag.with(FetchVector.Dimension.TENANT_ID, application.tenant().value()) + .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm()) + .with(FetchVector.Dimension.VESPA_VERSION, Vtag.currentVersion.toFullString()) + .value(); // Worst-case each ClusterCapacity in preprovisionCapacity will require an allocation. int maxProvisions = preprovisionCapacity.size(); @@ -199,7 +204,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { var nodesPlusProvisioned = new ArrayList<>(nodeList.asList()); for (int numProvisions = 0;; ++numProvisions) { var nodesPlusProvisionedPlusAllocated = new ArrayList<>(nodesPlusProvisioned); - Optional<ClusterCapacity> deficit = allocatePreprovisionCapacity(preprovisionCapacity, nodesPlusProvisionedPlusAllocated, makeExclusive); + Optional<ClusterCapacity> deficit = allocatePreprovisionCapacity(application, preprovisionCapacity, nodesPlusProvisionedPlusAllocated, makeExclusive); if (deficit.isEmpty()) { return nodesPlusProvisionedPlusAllocated; } @@ -258,13 +263,14 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { * they are added to {@code mutableNodes} * @return the part of a cluster capacity it was unable to allocate, if any */ - private Optional<ClusterCapacity> allocatePreprovisionCapacity(List<ClusterCapacity> preprovisionCapacity, + private Optional<ClusterCapacity> allocatePreprovisionCapacity(ApplicationId application, + List<ClusterCapacity> preprovisionCapacity, ArrayList<Node> mutableNodes, boolean makeExclusive) { for (int clusterIndex = 0; clusterIndex < preprovisionCapacity.size(); ++clusterIndex) { ClusterCapacity clusterCapacity = preprovisionCapacity.get(clusterIndex); LockedNodeList allNodes = new LockedNodeList(mutableNodes, () -> {}); - List<Node> candidates = findCandidates(clusterCapacity, clusterIndex, allNodes, makeExclusive); + List<Node> candidates = findCandidates(application, clusterCapacity, clusterIndex, allNodes, makeExclusive); int deficit = Math.max(0, clusterCapacity.count() - candidates.size()); if (deficit > 0) { return Optional.of(clusterCapacity.withCount(deficit)); @@ -277,25 +283,24 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { return Optional.empty(); } - private List<Node> findCandidates(ClusterCapacity clusterCapacity, int clusterIndex, LockedNodeList allNodes, boolean makeExclusive) { + private List<Node> findCandidates(ApplicationId application, ClusterCapacity clusterCapacity, int clusterIndex, LockedNodeList allNodes, boolean makeExclusive) { NodeResources nodeResources = toNodeResources(clusterCapacity); // We'll allocate each ClusterCapacity as a unique cluster in a dummy application - ApplicationId applicationId = ApplicationId.defaultId(); ClusterSpec cluster = asSpec(Optional.ofNullable(clusterCapacity.clusterType()), clusterIndex); NodeSpec nodeSpec = NodeSpec.from(clusterCapacity.count(), 1, nodeResources, false, true, nodeRepository().zone().cloud().account(), Duration.ZERO); var allocationContext = IP.Allocation.Context.from(nodeRepository().zone().cloud().name(), nodeSpec.cloudAccount().isExclave(nodeRepository().zone()), nodeRepository().nameResolver()); - NodePrioritizer prioritizer = new NodePrioritizer(allNodes, applicationId, cluster, nodeSpec, + NodePrioritizer prioritizer = new NodePrioritizer(allNodes, application, cluster, nodeSpec, true, false, allocationContext, nodeRepository().nodes(), nodeRepository().resourcesCalculator(), nodeRepository().spareCount(), nodeRepository().exclusiveAllocation(cluster), makeExclusive); List<NodeCandidate> nodeCandidates = prioritizer.collect() .stream() .filter(node -> node.violatesExclusivity(cluster, - applicationId, + application, nodeRepository().exclusiveAllocation(cluster), false, nodeRepository().zone().cloud().allowHostSharing(), @@ -308,7 +313,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { .stream() .limit(clusterCapacity.count()) .map(candidate -> candidate.toNode() - .allocate(applicationId, + .allocate(application, ClusterMembership.from(cluster, index.next()), nodeResources, nodeRepository().clock().instant())) diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java index 270fda8314e..83afe92d025 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java @@ -14,6 +14,7 @@ import com.yahoo.text.internal.SnippetGenerator; import com.yahoo.transaction.Mutex; import com.yahoo.vespa.applicationmodel.InfrastructureApplication; import com.yahoo.vespa.flags.BooleanFlag; +import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.hosted.provision.LockedNodeList; import com.yahoo.vespa.hosted.provision.Node; @@ -76,7 +77,10 @@ public class Preparer { loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requested)); - boolean makeExclusive = makeExclusiveFlag.value(); + boolean makeExclusive = makeExclusiveFlag.with(FetchVector.Dimension.TENANT_ID, application.tenant().value()) + .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm()) + .with(FetchVector.Dimension.VESPA_VERSION, cluster.vespaVersion().toFullString()) + .value(); // Try preparing in memory without global unallocated lock. Most of the time there should be no changes, // and we can return nodes previously allocated. LockedNodeList allNodes = nodeRepository.nodes().list(PROBE_LOCK); |