From 9f18ca064bf95f0706bb83d9c22fb6fa2993173c Mon Sep 17 00:00:00 2001 From: HÃ¥kon Hallingstad Date: Thu, 26 Oct 2023 16:20:34 +0200 Subject: Set tenant, instance ID, and Vespa version dimensions of make-exclusive --- .../maintenance/HostCapacityMaintainer.java | 23 +++++++++++++--------- .../hosted/provision/provisioning/Preparer.java | 6 +++++- 2 files changed, 19 insertions(+), 10 deletions(-) (limited to 'node-repository') diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java index be6c420c63b..3c42972ee0b 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostCapacityMaintainer.java @@ -14,6 +14,7 @@ import com.yahoo.jdisc.Metric; import com.yahoo.lang.MutableInteger; import com.yahoo.transaction.Mutex; import com.yahoo.vespa.flags.BooleanFlag; +import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.FlagSource; import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.flags.ListFlag; @@ -191,7 +192,11 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { */ private List provisionUntilNoDeficit(NodeList nodeList) { List preprovisionCapacity = preprovisionCapacityFlag.value(); - boolean makeExclusive = makeExclusiveFlag.value(); + ApplicationId application = ApplicationId.defaultId(); + boolean makeExclusive = makeExclusiveFlag.with(FetchVector.Dimension.TENANT_ID, application.tenant().value()) + .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm()) + .with(FetchVector.Dimension.VESPA_VERSION, Vtag.currentVersion.toFullString()) + .value(); // Worst-case each ClusterCapacity in preprovisionCapacity will require an allocation. int maxProvisions = preprovisionCapacity.size(); @@ -199,7 +204,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { var nodesPlusProvisioned = new ArrayList<>(nodeList.asList()); for (int numProvisions = 0;; ++numProvisions) { var nodesPlusProvisionedPlusAllocated = new ArrayList<>(nodesPlusProvisioned); - Optional deficit = allocatePreprovisionCapacity(preprovisionCapacity, nodesPlusProvisionedPlusAllocated, makeExclusive); + Optional deficit = allocatePreprovisionCapacity(application, preprovisionCapacity, nodesPlusProvisionedPlusAllocated, makeExclusive); if (deficit.isEmpty()) { return nodesPlusProvisionedPlusAllocated; } @@ -258,13 +263,14 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { * they are added to {@code mutableNodes} * @return the part of a cluster capacity it was unable to allocate, if any */ - private Optional allocatePreprovisionCapacity(List preprovisionCapacity, + private Optional allocatePreprovisionCapacity(ApplicationId application, + List preprovisionCapacity, ArrayList mutableNodes, boolean makeExclusive) { for (int clusterIndex = 0; clusterIndex < preprovisionCapacity.size(); ++clusterIndex) { ClusterCapacity clusterCapacity = preprovisionCapacity.get(clusterIndex); LockedNodeList allNodes = new LockedNodeList(mutableNodes, () -> {}); - List candidates = findCandidates(clusterCapacity, clusterIndex, allNodes, makeExclusive); + List candidates = findCandidates(application, clusterCapacity, clusterIndex, allNodes, makeExclusive); int deficit = Math.max(0, clusterCapacity.count() - candidates.size()); if (deficit > 0) { return Optional.of(clusterCapacity.withCount(deficit)); @@ -277,25 +283,24 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { return Optional.empty(); } - private List findCandidates(ClusterCapacity clusterCapacity, int clusterIndex, LockedNodeList allNodes, boolean makeExclusive) { + private List findCandidates(ApplicationId application, ClusterCapacity clusterCapacity, int clusterIndex, LockedNodeList allNodes, boolean makeExclusive) { NodeResources nodeResources = toNodeResources(clusterCapacity); // We'll allocate each ClusterCapacity as a unique cluster in a dummy application - ApplicationId applicationId = ApplicationId.defaultId(); ClusterSpec cluster = asSpec(Optional.ofNullable(clusterCapacity.clusterType()), clusterIndex); NodeSpec nodeSpec = NodeSpec.from(clusterCapacity.count(), 1, nodeResources, false, true, nodeRepository().zone().cloud().account(), Duration.ZERO); var allocationContext = IP.Allocation.Context.from(nodeRepository().zone().cloud().name(), nodeSpec.cloudAccount().isExclave(nodeRepository().zone()), nodeRepository().nameResolver()); - NodePrioritizer prioritizer = new NodePrioritizer(allNodes, applicationId, cluster, nodeSpec, + NodePrioritizer prioritizer = new NodePrioritizer(allNodes, application, cluster, nodeSpec, true, false, allocationContext, nodeRepository().nodes(), nodeRepository().resourcesCalculator(), nodeRepository().spareCount(), nodeRepository().exclusiveAllocation(cluster), makeExclusive); List nodeCandidates = prioritizer.collect() .stream() .filter(node -> node.violatesExclusivity(cluster, - applicationId, + application, nodeRepository().exclusiveAllocation(cluster), false, nodeRepository().zone().cloud().allowHostSharing(), @@ -308,7 +313,7 @@ public class HostCapacityMaintainer extends NodeRepositoryMaintainer { .stream() .limit(clusterCapacity.count()) .map(candidate -> candidate.toNode() - .allocate(applicationId, + .allocate(application, ClusterMembership.from(cluster, index.next()), nodeResources, nodeRepository().clock().instant())) diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java index 270fda8314e..83afe92d025 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java @@ -14,6 +14,7 @@ import com.yahoo.text.internal.SnippetGenerator; import com.yahoo.transaction.Mutex; import com.yahoo.vespa.applicationmodel.InfrastructureApplication; import com.yahoo.vespa.flags.BooleanFlag; +import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.hosted.provision.LockedNodeList; import com.yahoo.vespa.hosted.provision.Node; @@ -76,7 +77,10 @@ public class Preparer { loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requested)); - boolean makeExclusive = makeExclusiveFlag.value(); + boolean makeExclusive = makeExclusiveFlag.with(FetchVector.Dimension.TENANT_ID, application.tenant().value()) + .with(FetchVector.Dimension.INSTANCE_ID, application.serializedForm()) + .with(FetchVector.Dimension.VESPA_VERSION, cluster.vespaVersion().toFullString()) + .value(); // Try preparing in memory without global unallocated lock. Most of the time there should be no changes, // and we can return nodes previously allocated. LockedNodeList allNodes = nodeRepository.nodes().list(PROBE_LOCK); -- cgit v1.2.3