summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHÃ¥kon Hallingstad <hakon@oath.com>2018-11-05 15:11:35 +0100
committerGitHub <noreply@github.com>2018-11-05 15:11:35 +0100
commitc865c789f5368971941a3b3b2f42b09ef99ef6aa (patch)
tree1c1ce502d6f1eb49d4d7b6935caa342cc4e2b92e
parentb045638010b7aa3c8fb9b3ea8f127ca4246e8740 (diff)
parentf3605257d66124d23d49d73db744e7adc0770f41 (diff)
Merge pull request #7564 from vespa-engine/hakonhall/send-probe-when-suspending-many-nodes
Send probe when suspending many nodes
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorContext.java15
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java34
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientImpl.java8
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerStateRequest.java27
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java3
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java20
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java6
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java13
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/InMemoryStatusService.java6
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/StatusService.java8
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusService.java22
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java20
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientTest.java41
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java12
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostResourceTest.java2
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusServiceTest.java41
16 files changed, 157 insertions, 121 deletions
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorContext.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorContext.java
index f1f572621ce..d3bdaa6dc64 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorContext.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorContext.java
@@ -22,16 +22,16 @@ public class OrchestratorContext {
private final Clock clock;
private final TimeBudget timeBudget;
- private boolean probe;
+ private final boolean probe;
/** Create an OrchestratorContext for operations on multiple applications. */
public static OrchestratorContext createContextForMultiAppOp(Clock clock) {
- return new OrchestratorContext(clock, TimeBudget.fromNow(clock, DEFAULT_TIMEOUT_FOR_BATCH_OP), true);
+ return new OrchestratorContext(clock, TimeBudget.fromNow(clock, DEFAULT_TIMEOUT_FOR_BATCH_OP), false);
}
/** Create an OrchestratorContext for an operation on a single application. */
public static OrchestratorContext createContextForSingleAppOp(Clock clock) {
- return new OrchestratorContext(clock, TimeBudget.fromNow(clock, DEFAULT_TIMEOUT_FOR_SINGLE_OP), true);
+ return new OrchestratorContext(clock, TimeBudget.fromNow(clock, DEFAULT_TIMEOUT_FOR_SINGLE_OP), false);
}
private OrchestratorContext(Clock clock, TimeBudget timeBudget, boolean probe) {
@@ -48,13 +48,6 @@ public class OrchestratorContext {
return new ClusterControllerClientTimeouts(timeBudget.timeLeftAsTimeBudget());
}
-
- /** Mark this operation as a non-committal probe. */
- public OrchestratorContext markAsProbe() {
- this.probe = true;
- return this;
- }
-
/** Whether the operation is a no-op probe to test whether it would have succeeded, if it had been committal. */
public boolean isProbe() {
return probe;
@@ -69,7 +62,7 @@ public class OrchestratorContext {
}
/** Create an OrchestratorContext for an operation on a single application, but limited to current timeout. */
- public OrchestratorContext createSubcontextForSingleAppOp() {
+ public OrchestratorContext createSubcontextForSingleAppOp(boolean probe) {
Instant now = clock.instant();
Instant deadline = timeBudget.deadline().get();
Instant maxDeadline = now.plus(DEFAULT_TIMEOUT_FOR_SINGLE_OP);
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java
index 6811788ffb7..77bb1e99e19 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java
@@ -107,7 +107,7 @@ public class OrchestratorImpl implements Orchestrator {
ApplicationInstanceReference reference = getApplicationInstance(hostName).reference();
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
try (MutableStatusRegistry statusRegistry = statusService
- .lockApplicationInstance_forCurrentThreadOnly(reference, context.getTimeLeft())) {
+ .lockApplicationInstance_forCurrentThreadOnly(context, reference)) {
statusRegistry.setHostState(hostName, status);
}
}
@@ -130,9 +130,8 @@ public class OrchestratorImpl implements Orchestrator {
ApplicationInstance appInstance = getApplicationInstance(hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
- try (MutableStatusRegistry statusRegistry = statusService.lockApplicationInstance_forCurrentThreadOnly(
- appInstance.reference(),
- context.getTimeLeft())) {
+ try (MutableStatusRegistry statusRegistry = statusService
+ .lockApplicationInstance_forCurrentThreadOnly(context, appInstance.reference())) {
final HostStatus currentHostState = statusRegistry.getHostStatus(hostName);
if (HostStatus.NO_REMARKS == currentHostState) {
@@ -159,9 +158,8 @@ public class OrchestratorImpl implements Orchestrator {
NodeGroup nodeGroup = new NodeGroup(appInstance, hostName);
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
- try (MutableStatusRegistry statusRegistry = statusService.lockApplicationInstance_forCurrentThreadOnly(
- appInstance.reference(),
- context.getTimeLeft())) {
+ try (MutableStatusRegistry statusRegistry = statusService
+ .lockApplicationInstance_forCurrentThreadOnly(context, appInstance.reference())) {
ApplicationApi applicationApi = new ApplicationApiImpl(
nodeGroup,
statusRegistry,
@@ -181,9 +179,7 @@ public class OrchestratorImpl implements Orchestrator {
ApplicationInstanceReference applicationReference = nodeGroup.getApplicationReference();
try (MutableStatusRegistry hostStatusRegistry =
- statusService.lockApplicationInstance_forCurrentThreadOnly(
- applicationReference,
- context.getTimeLeft())) {
+ statusService.lockApplicationInstance_forCurrentThreadOnly(context, applicationReference)) {
ApplicationInstanceStatus appStatus = statusService.forApplicationInstance(applicationReference).getApplicationInstanceStatus();
if (appStatus == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) {
return;
@@ -222,6 +218,8 @@ public class OrchestratorImpl implements Orchestrator {
@Override
public void suspendAll(HostName parentHostname, List<HostName> hostNames)
throws BatchHostStateChangeDeniedException, BatchHostNameNotFoundException, BatchInternalErrorException {
+ OrchestratorContext context = OrchestratorContext.createContextForMultiAppOp(clock);
+
List<NodeGroup> nodeGroupsOrderedByApplication;
try {
nodeGroupsOrderedByApplication = nodeGroupsOrderedForSuspend(hostNames);
@@ -229,10 +227,18 @@ public class OrchestratorImpl implements Orchestrator {
throw new BatchHostNameNotFoundException(parentHostname, hostNames, e);
}
- OrchestratorContext context = OrchestratorContext.createContextForMultiAppOp(clock);
+ suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, true);
+ suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, false);
+ }
+
+ private void suspendAllNodeGroups(OrchestratorContext context,
+ HostName parentHostname,
+ List<NodeGroup> nodeGroupsOrderedByApplication,
+ boolean probe)
+ throws BatchHostStateChangeDeniedException, BatchInternalErrorException {
for (NodeGroup nodeGroup : nodeGroupsOrderedByApplication) {
try {
- suspendGroup(context.createSubcontextForSingleAppOp(), nodeGroup);
+ suspendGroup(context.createSubcontextForSingleAppOp(probe), nodeGroup);
} catch (HostStateChangeDeniedException e) {
throw new BatchHostStateChangeDeniedException(parentHostname, nodeGroup, e);
} catch (RuntimeException e) {
@@ -307,9 +313,7 @@ public class OrchestratorImpl implements Orchestrator {
OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock);
ApplicationInstanceReference appRef = OrchestratorUtil.toApplicationInstanceReference(appId, instanceLookupService);
try (MutableStatusRegistry statusRegistry =
- statusService.lockApplicationInstance_forCurrentThreadOnly(
- appRef,
- context.getTimeLeft())) {
+ statusService.lockApplicationInstance_forCurrentThreadOnly(context, appRef)) {
// Short-circuit if already in wanted state
if (status == statusRegistry.getApplicationInstanceStatus()) return;
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientImpl.java
index 50ffe13b437..04725c330e0 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientImpl.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientImpl.java
@@ -35,7 +35,10 @@ public class ClusterControllerClientImpl implements ClusterControllerClient{
int storageNodeIndex,
ClusterControllerNodeState wantedState) throws IOException {
ClusterControllerStateRequest.State state = new ClusterControllerStateRequest.State(wantedState, REQUEST_REASON);
- ClusterControllerStateRequest stateRequest = new ClusterControllerStateRequest(state, ClusterControllerStateRequest.Condition.SAFE);
+ ClusterControllerStateRequest stateRequest = new ClusterControllerStateRequest(
+ state,
+ ClusterControllerStateRequest.Condition.SAFE,
+ context.isProbe() ? true : null);
ClusterControllerClientTimeouts timeouts = context.getClusterControllerTimeouts();
try {
@@ -67,7 +70,8 @@ public class ClusterControllerClientImpl implements ClusterControllerClient{
OrchestratorContext context,
ClusterControllerNodeState wantedState) throws IOException {
ClusterControllerStateRequest.State state = new ClusterControllerStateRequest.State(wantedState, REQUEST_REASON);
- ClusterControllerStateRequest stateRequest = new ClusterControllerStateRequest(state, ClusterControllerStateRequest.Condition.FORCE);
+ ClusterControllerStateRequest stateRequest = new ClusterControllerStateRequest(
+ state, ClusterControllerStateRequest.Condition.FORCE, null);
ClusterControllerClientTimeouts timeouts = context.getClusterControllerTimeouts();
try {
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerStateRequest.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerStateRequest.java
index 9a29ad04d78..b07a9e2bfa1 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerStateRequest.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerStateRequest.java
@@ -1,8 +1,10 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.orchestrator.controller;
+import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
+import javax.annotation.concurrent.Immutable;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
@@ -10,6 +12,8 @@ import java.util.Objects;
/**
* @author hakonhall
*/
+@Immutable
+@JsonInclude(JsonInclude.Include.NON_NULL)
public class ClusterControllerStateRequest {
@JsonProperty("state")
@@ -18,26 +22,29 @@ public class ClusterControllerStateRequest {
@JsonProperty("condition")
public final Condition condition;
- public ClusterControllerStateRequest(State currentState, Condition condition) {
+ @JsonProperty("probe")
+ public final Boolean probe;
+
+ public ClusterControllerStateRequest(State currentState, Condition condition, Boolean probe) {
Map<String, State> state = Collections.singletonMap("user", currentState);
this.state = Collections.unmodifiableMap(state);
this.condition = condition;
+ this.probe = probe;
}
@Override
- public boolean equals(Object object) {
- if (!(object instanceof ClusterControllerStateRequest)) {
- return false;
- }
-
- final ClusterControllerStateRequest that = (ClusterControllerStateRequest) object;
- return Objects.equals(this.state, that.state)
- && Objects.equals(this.condition, that.condition);
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ClusterControllerStateRequest that = (ClusterControllerStateRequest) o;
+ return Objects.equals(state, that.state) &&
+ condition == that.condition &&
+ Objects.equals(probe, that.probe);
}
@Override
public int hashCode() {
- return Objects.hash(state, condition);
+ return Objects.hash(state, condition, probe);
}
@Override
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java
index 0ca509d13f1..e2f371a5ce1 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.orchestrator.model;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.vespa.applicationmodel.HostName;
+import com.yahoo.vespa.orchestrator.OrchestratorContext;
import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus;
import com.yahoo.vespa.orchestrator.status.HostStatus;
@@ -26,7 +27,7 @@ public interface ApplicationApi {
ApplicationInstanceStatus getApplicationStatus();
- void setHostState(HostName hostName, HostStatus status);
+ void setHostState(OrchestratorContext context, HostName hostName, HostStatus status);
List<HostName> getNodesInGroupWithStatus(HostStatus status);
List<StorageNode> getStorageNodesInGroupInClusterOrder();
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java
index c5bcaf4de82..9ec1697a45f 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java
@@ -6,12 +6,12 @@ import com.yahoo.vespa.applicationmodel.ApplicationInstance;
import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.applicationmodel.ServiceCluster;
import com.yahoo.vespa.applicationmodel.ServiceInstance;
+import com.yahoo.vespa.orchestrator.OrchestratorContext;
import com.yahoo.vespa.orchestrator.OrchestratorUtil;
import com.yahoo.vespa.orchestrator.controller.ClusterControllerClientFactory;
import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus;
import com.yahoo.vespa.orchestrator.status.HostStatus;
import com.yahoo.vespa.orchestrator.status.MutableStatusRegistry;
-import com.yahoo.vespa.orchestrator.status.ReadOnlyStatusRegistry;
import java.util.Collection;
import java.util.Comparator;
@@ -20,6 +20,7 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
+import java.util.function.Function;
import java.util.stream.Collectors;
import static com.yahoo.vespa.orchestrator.OrchestratorUtil.getHostsUsedByApplicationInstance;
@@ -29,7 +30,6 @@ public class ApplicationApiImpl implements ApplicationApi {
private final NodeGroup nodeGroup;
private final MutableStatusRegistry hostStatusService;
private final List<ClusterApi> clusterInOrder;
- private final ClusterControllerClientFactory clusterControllerClientFactory;
private final Map<HostName, HostStatus> hostStatusMap;
public ApplicationApiImpl(NodeGroup nodeGroup,
@@ -38,11 +38,9 @@ public class ApplicationApiImpl implements ApplicationApi {
this.applicationInstance = nodeGroup.getApplication();
this.nodeGroup = nodeGroup;
this.hostStatusService = hostStatusService;
- this.hostStatusMap = createHostStatusMap(
- getHostsUsedByApplicationInstance(applicationInstance),
- hostStatusService);
+ Collection<HostName> hosts = getHostsUsedByApplicationInstance(applicationInstance);
+ this.hostStatusMap = hosts.stream().collect(Collectors.toMap(Function.identity(), hostStatusService::getHostStatus));
this.clusterInOrder = makeClustersInOrder(nodeGroup, hostStatusMap, clusterControllerClientFactory);
- this.clusterControllerClientFactory = clusterControllerClientFactory;
}
@Override
@@ -50,14 +48,6 @@ public class ApplicationApiImpl implements ApplicationApi {
return OrchestratorUtil.toApplicationId(applicationInstance.reference());
}
- private static Map<HostName, HostStatus> createHostStatusMap(Collection<HostName> hosts,
- ReadOnlyStatusRegistry hostStatusService) {
- return hosts.stream()
- .collect(Collectors.toMap(
- hostName -> hostName,
- hostName -> hostStatusService.getHostStatus(hostName)));
- }
-
private HostStatus getHostStatus(HostName hostName) {
return hostStatusMap.getOrDefault(hostName, HostStatus.NO_REMARKS);
}
@@ -104,7 +94,7 @@ public class ApplicationApiImpl implements ApplicationApi {
}
@Override
- public void setHostState(HostName hostName, HostStatus status) {
+ public void setHostState(OrchestratorContext context, HostName hostName, HostStatus status) {
hostStatusService.setHostState(hostName, status);
}
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java
index 9900c8de752..0e4fe672725 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java
@@ -104,6 +104,12 @@ public class StorageNodeImpl implements StorageNode {
HostedVespaPolicy.SET_NODE_STATE_CONSTRAINT,
"Failed to set state to " + wantedNodeState + " in cluster controller: " + response.reason);
}
+
+ String logSuffix = context.isProbe() ?
+ " would have been set to " + wantedNodeState + " (this is a probe)" :
+ " has been set to " + wantedNodeState;
+ logger.log(LogLevel.INFO, "Storage node " + nodeIndex + " in cluster " + clusterId +
+ " application " + applicationInstance.reference().asString() + " on host " + hostName() + logSuffix);
}
@Override
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java
index a781fd2358a..4aa0f3452e3 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.orchestrator.policy;
-import com.yahoo.log.LogLevel;
import com.yahoo.vespa.applicationmodel.ApplicationInstance;
import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.orchestrator.OrchestratorContext;
@@ -52,13 +51,11 @@ public class HostedVespaPolicy implements Policy {
// These storage nodes are guaranteed to be NO_REMARKS
for (StorageNode storageNode : application.getUpStorageNodesInGroupInClusterOrder()) {
storageNode.setNodeState(context, ClusterControllerNodeState.MAINTENANCE);
- log.log(LogLevel.INFO, "The storage node on " + storageNode.hostName() + " has been set to MAINTENANCE");
}
// Ensure all nodes in the group are marked as allowed to be down
for (HostName hostName : application.getNodesInGroupWithStatus(HostStatus.NO_REMARKS)) {
- application.setHostState(hostName, HostStatus.ALLOWED_TO_BE_DOWN);
- log.log(LogLevel.INFO, hostName + " is now allowed to be down (suspended)");
+ application.setHostState(context, hostName, HostStatus.ALLOWED_TO_BE_DOWN);
}
}
@@ -68,12 +65,10 @@ public class HostedVespaPolicy implements Policy {
// Always defer to Cluster Controller whether it's OK to resume storage node
for (StorageNode storageNode : application.getStorageNodesAllowedToBeDownInGroupInReverseClusterOrder()) {
storageNode.setNodeState(context, ClusterControllerNodeState.UP);
- log.log(LogLevel.INFO, "The storage node on " + storageNode.hostName() + " has been set to UP");
}
for (HostName hostName : application.getNodesInGroupWithStatus(HostStatus.ALLOWED_TO_BE_DOWN)) {
- application.setHostState(hostName, HostStatus.NO_REMARKS);
- log.log(LogLevel.INFO, hostName + " is no longer allowed to be down (resumed)");
+ application.setHostState(context, hostName, HostStatus.NO_REMARKS);
}
}
@@ -98,13 +93,11 @@ public class HostedVespaPolicy implements Policy {
// These storage nodes are guaranteed to be NO_REMARKS
for (StorageNode storageNode : applicationApi.getStorageNodesInGroupInClusterOrder()) {
storageNode.setNodeState(context, ClusterControllerNodeState.DOWN);
- log.log(LogLevel.INFO, "The storage node on " + storageNode.hostName() + " has been set DOWN");
}
// Ensure all nodes in the group are marked as allowed to be down
for (HostName hostName : applicationApi.getNodesInGroupWithStatus(HostStatus.NO_REMARKS)) {
- applicationApi.setHostState(hostName, HostStatus.ALLOWED_TO_BE_DOWN);
- log.log(LogLevel.INFO, hostName + " is now allowed to be down (suspended)");
+ applicationApi.setHostState(context, hostName, HostStatus.ALLOWED_TO_BE_DOWN);
}
}
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/InMemoryStatusService.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/InMemoryStatusService.java
index bd5eb6f3e29..70128ae12eb 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/InMemoryStatusService.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/InMemoryStatusService.java
@@ -3,8 +3,8 @@ package com.yahoo.vespa.orchestrator.status;
import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
import com.yahoo.vespa.applicationmodel.HostName;
+import com.yahoo.vespa.orchestrator.OrchestratorContext;
-import java.time.Duration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
@@ -46,8 +46,8 @@ public class InMemoryStatusService implements StatusService {
@Override
public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly(
- ApplicationInstanceReference applicationInstanceReference,
- Duration timeout) {
+ OrchestratorContext context,
+ ApplicationInstanceReference applicationInstanceReference) {
Lock lock = instanceLockService.get(applicationInstanceReference);
return new InMemoryMutableStatusRegistry(lock, applicationInstanceReference);
}
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/StatusService.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/StatusService.java
index 76adef72b2b..99f6c113193 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/StatusService.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/StatusService.java
@@ -2,8 +2,8 @@
package com.yahoo.vespa.orchestrator.status;
import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
+import com.yahoo.vespa.orchestrator.OrchestratorContext;
-import java.time.Duration;
import java.util.Set;
/**
@@ -25,7 +25,7 @@ public interface StatusService {
* possibly inconsistent snapshot values. It is not recommended that this method is used for anything other
* than monitoring, logging, debugging, etc. It should never be used for multi-step operations (e.g.
* read-then-write) where consistency is required. For those cases, use
- * {@link #lockApplicationInstance_forCurrentThreadOnly(ApplicationInstanceReference, Duration)}.
+ * {@link #lockApplicationInstance_forCurrentThreadOnly(OrchestratorContext, ApplicationInstanceReference)}.
*/
ReadOnlyStatusRegistry forApplicationInstance(ApplicationInstanceReference applicationInstanceReference);
@@ -54,8 +54,8 @@ public interface StatusService {
* This may leave the registry in an inconsistent state (as judged by the client code).
*/
MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly(
- ApplicationInstanceReference applicationInstanceReference,
- Duration timeout);
+ OrchestratorContext context,
+ ApplicationInstanceReference applicationInstanceReference);
/**
* Returns all application instances that are allowed to be down. The intention is to use this
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusService.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusService.java
index 7df29e038c1..3360a12c32e 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusService.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusService.java
@@ -7,6 +7,7 @@ import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
import com.yahoo.vespa.applicationmodel.HostName;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.Lock;
+import com.yahoo.vespa.orchestrator.OrchestratorContext;
import com.yahoo.vespa.orchestrator.OrchestratorUtil;
import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex;
import org.apache.zookeeper.KeeperException.NoNodeException;
@@ -90,14 +91,15 @@ public class ZookeeperStatusService implements StatusService {
*/
@Override
public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly(
- ApplicationInstanceReference applicationInstanceReference,
- Duration timeout) {
+ OrchestratorContext context,
+ ApplicationInstanceReference applicationInstanceReference) {
+ Duration duration = context.getTimeLeft();
String lockPath = applicationInstanceLock2Path(applicationInstanceReference);
Lock lock = new Lock(lockPath, curator);
- lock.acquire(timeout);
+ lock.acquire(duration);
try {
- return new ZkMutableStatusRegistry(lock, applicationInstanceReference);
+ return new ZkMutableStatusRegistry(lock, applicationInstanceReference, context.isProbe());
} catch (Throwable t) {
// In case the constructor throws an exception.
lock.close();
@@ -215,23 +217,31 @@ public class ZookeeperStatusService implements StatusService {
private class ZkMutableStatusRegistry implements MutableStatusRegistry {
private final Lock lock;
private final ApplicationInstanceReference applicationInstanceReference;
+ private final boolean probe;
public ZkMutableStatusRegistry(
Lock lock,
- ApplicationInstanceReference applicationInstanceReference) {
+ ApplicationInstanceReference applicationInstanceReference,
+ boolean probe) {
this.lock = lock;
this.applicationInstanceReference = applicationInstanceReference;
+ this.probe = probe;
}
@Override
public void setHostState(final HostName hostName, final HostStatus status) {
+ if (probe) return;
+ log.log(LogLevel.INFO, "Setting host " + hostName + " to status " + status);
setHostStatus(applicationInstanceReference, hostName, status);
}
@Override
public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) {
- String path = applicationInstanceSuspendedPath(applicationInstanceReference);
+ if (probe) return;
+
+ log.log(LogLevel.INFO, "Setting app " + applicationInstanceReference.asString() + " to status " + applicationInstanceStatus);
+ String path = applicationInstanceSuspendedPath(applicationInstanceReference);
try {
switch (applicationInstanceStatus) {
case NO_REMARKS:
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java
index 80174d05a54..cc6b9a7dbf7 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java
@@ -16,6 +16,7 @@ import com.yahoo.vespa.applicationmodel.TenantId;
import com.yahoo.vespa.orchestrator.config.OrchestratorConfig;
import com.yahoo.vespa.orchestrator.controller.ClusterControllerClientFactory;
import com.yahoo.vespa.orchestrator.controller.ClusterControllerClientFactoryMock;
+import com.yahoo.vespa.orchestrator.model.NodeGroup;
import com.yahoo.vespa.orchestrator.policy.BatchHostStateChangeDeniedException;
import com.yahoo.vespa.orchestrator.policy.HostStateChangeDeniedException;
import com.yahoo.vespa.orchestrator.status.HostStatus;
@@ -25,6 +26,7 @@ import com.yahoo.vespa.orchestrator.status.StatusService;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import org.mockito.ArgumentCaptor;
import org.mockito.InOrder;
import java.util.Arrays;
@@ -247,8 +249,6 @@ public class OrchestratorImplTest {
// A spy is preferential because suspendAll() relies on delegating the hard work to suspend() and resume().
OrchestratorImpl orchestrator = spy(this.orchestrator);
- OrchestratorContext context = mock(OrchestratorContext.class);
-
orchestrator.suspendAll(
new HostName("parentHostname"),
Arrays.asList(
@@ -261,12 +261,22 @@ public class OrchestratorImplTest {
// TEST6: tenant-id-3:application-instance-3:default
// TEST1: test-tenant-id:application:instance
InOrder order = inOrder(orchestrator);
- order.verify(orchestrator).suspendGroup(any(), eq(DummyInstanceLookupService.TEST3_NODE_GROUP));
- order.verify(orchestrator).suspendGroup(any(), eq(DummyInstanceLookupService.TEST6_NODE_GROUP));
- order.verify(orchestrator).suspendGroup(any(), eq(DummyInstanceLookupService.TEST1_NODE_GROUP));
+ verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST3_NODE_GROUP, true);
+ verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST6_NODE_GROUP, true);
+ verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST1_NODE_GROUP, true);
+ verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST3_NODE_GROUP, false);
+ verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST6_NODE_GROUP, false);
+ verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST1_NODE_GROUP, false);
order.verifyNoMoreInteractions();
}
+ private void verifySuspendGroup(InOrder order, OrchestratorImpl orchestrator, NodeGroup nodeGroup, boolean probe)
+ throws HostStateChangeDeniedException{
+ ArgumentCaptor<OrchestratorContext> argument = ArgumentCaptor.forClass(OrchestratorContext.class);
+ order.verify(orchestrator).suspendGroup(argument.capture(), eq(nodeGroup));
+ assertEquals(probe, argument.getValue().isProbe());
+ }
+
@Test
public void whenSuspendAllFails() throws Exception {
// A spy is preferential because suspendAll() relies on delegating the hard work to suspend() and resume().
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientTest.java
index 35dda403aed..1eca61c8a0a 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientTest.java
@@ -4,8 +4,10 @@ package com.yahoo.vespa.orchestrator.controller;
import com.yahoo.vespa.jaxrs.client.JaxRsStrategy;
import com.yahoo.vespa.jaxrs.client.LocalPassThroughJaxRsStrategy;
import com.yahoo.vespa.orchestrator.OrchestratorContext;
+import org.junit.Before;
import org.junit.Test;
+import java.io.IOException;
import java.time.Duration;
import static org.mockito.Matchers.eq;
@@ -18,25 +20,38 @@ public class ClusterControllerClientTest {
private static final String CLUSTER_NAME = "clusterName";
private static final int STORAGE_NODE_INDEX = 0;
- @Test
- public void correctParametersArePassedThrough() throws Exception {
- final ClusterControllerJaxRsApi clusterControllerApi = mock(ClusterControllerJaxRsApi.class);
- final JaxRsStrategy<ClusterControllerJaxRsApi> strategyMock = new LocalPassThroughJaxRsStrategy<>(clusterControllerApi);
- final ClusterControllerClient clusterControllerClient = new ClusterControllerClientImpl(
- strategyMock,
- CLUSTER_NAME);
-
- final ClusterControllerNodeState wantedState = ClusterControllerNodeState.MAINTENANCE;
+ private final ClusterControllerJaxRsApi clusterControllerApi = mock(ClusterControllerJaxRsApi.class);
+ private final JaxRsStrategy<ClusterControllerJaxRsApi> strategyMock = new LocalPassThroughJaxRsStrategy<>(clusterControllerApi);
+ private final ClusterControllerClient clusterControllerClient = new ClusterControllerClientImpl(strategyMock, CLUSTER_NAME);
+ private final ClusterControllerNodeState wantedState = ClusterControllerNodeState.MAINTENANCE;
+ private final OrchestratorContext context = mock(OrchestratorContext.class);
+ private final ClusterControllerClientTimeouts timeouts = mock(ClusterControllerClientTimeouts.class);
+ private final ClusterControllerStateRequest.State state = new ClusterControllerStateRequest.State(wantedState, ClusterControllerClientImpl.REQUEST_REASON);
- OrchestratorContext context = mock(OrchestratorContext.class);
- ClusterControllerClientTimeouts timeouts = mock(ClusterControllerClientTimeouts.class);
+ @Before
+ public void setUp() {
when(context.getClusterControllerTimeouts()).thenReturn(timeouts);
+ when(context.isProbe()).thenReturn(false);
when(timeouts.getServerTimeoutOrThrow()).thenReturn(Duration.ofSeconds(1));
+ }
+
+ @Test
+ public void correctParametersArePassedThrough() throws IOException {
+ setNodeStateAndVerify(null);
+ }
+
+ @Test
+ public void probingIsCorrectlyPassedThrough() throws IOException {
+ when(context.isProbe()).thenReturn(true);
+ setNodeStateAndVerify(true);
+ }
+
+ private void setNodeStateAndVerify(Boolean expectedProbe) throws IOException {
clusterControllerClient.setNodeState(context, STORAGE_NODE_INDEX, wantedState);
final ClusterControllerStateRequest expectedNodeStateRequest = new ClusterControllerStateRequest(
- new ClusterControllerStateRequest.State(wantedState, ClusterControllerClientImpl.REQUEST_REASON),
- ClusterControllerStateRequest.Condition.SAFE);
+ state, ClusterControllerStateRequest.Condition.SAFE, expectedProbe);
+
verify(clusterControllerApi, times(1))
.setNodeState(
eq(CLUSTER_NAME),
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java
index 329c9576f2c..364402de6de 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java
@@ -84,9 +84,9 @@ public class HostedVespaPolicyTest {
order.verify(storageNode3).setNodeState(context, ClusterControllerNodeState.MAINTENANCE);
order.verify(applicationApi).getNodesInGroupWithStatus(HostStatus.NO_REMARKS);
- order.verify(applicationApi).setHostState(hostName1, HostStatus.ALLOWED_TO_BE_DOWN);
- order.verify(applicationApi).setHostState(hostName2, HostStatus.ALLOWED_TO_BE_DOWN);
- order.verify(applicationApi).setHostState(hostName3, HostStatus.ALLOWED_TO_BE_DOWN);
+ order.verify(applicationApi).setHostState(context, hostName1, HostStatus.ALLOWED_TO_BE_DOWN);
+ order.verify(applicationApi).setHostState(context, hostName2, HostStatus.ALLOWED_TO_BE_DOWN);
+ order.verify(applicationApi).setHostState(context, hostName3, HostStatus.ALLOWED_TO_BE_DOWN);
order.verifyNoMoreInteractions();
}
@@ -135,9 +135,9 @@ public class HostedVespaPolicyTest {
order.verify(storageNode3).setNodeState(context, ClusterControllerNodeState.DOWN);
order.verify(applicationApi).getNodesInGroupWithStatus(HostStatus.NO_REMARKS);
- order.verify(applicationApi).setHostState(hostName1, HostStatus.ALLOWED_TO_BE_DOWN);
- order.verify(applicationApi).setHostState(hostName2, HostStatus.ALLOWED_TO_BE_DOWN);
- order.verify(applicationApi).setHostState(hostName3, HostStatus.ALLOWED_TO_BE_DOWN);
+ order.verify(applicationApi).setHostState(context, hostName1, HostStatus.ALLOWED_TO_BE_DOWN);
+ order.verify(applicationApi).setHostState(context, hostName2, HostStatus.ALLOWED_TO_BE_DOWN);
+ order.verify(applicationApi).setHostState(context, hostName3, HostStatus.ALLOWED_TO_BE_DOWN);
order.verifyNoMoreInteractions();
}
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostResourceTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostResourceTest.java
index a9b8127e7fe..035d9cd686f 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostResourceTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostResourceTest.java
@@ -75,7 +75,7 @@ public class HostResourceTest {
static {
when(EVERY_HOST_IS_UP_HOST_STATUS_SERVICE.forApplicationInstance(eq(APPLICATION_INSTANCE_REFERENCE)))
.thenReturn(EVERY_HOST_IS_UP_MUTABLE_HOST_STATUS_REGISTRY);
- when(EVERY_HOST_IS_UP_HOST_STATUS_SERVICE.lockApplicationInstance_forCurrentThreadOnly(eq(APPLICATION_INSTANCE_REFERENCE), any()))
+ when(EVERY_HOST_IS_UP_HOST_STATUS_SERVICE.lockApplicationInstance_forCurrentThreadOnly(any(), eq(APPLICATION_INSTANCE_REFERENCE)))
.thenReturn(EVERY_HOST_IS_UP_MUTABLE_HOST_STATUS_REGISTRY);
when(EVERY_HOST_IS_UP_MUTABLE_HOST_STATUS_REGISTRY.getHostStatus(any()))
.thenReturn(HostStatus.NO_REMARKS);
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusServiceTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusServiceTest.java
index 44847666670..d57b0106b5b 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusServiceTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusServiceTest.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.orchestrator.status;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
import com.yahoo.vespa.curator.Curator;
+import com.yahoo.vespa.orchestrator.OrchestratorContext;
import com.yahoo.vespa.orchestrator.TestIds;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.apache.curator.SessionFailRetryLoop.SessionFailedException;
@@ -35,11 +36,14 @@ import static org.hamcrest.core.Is.is;
import static org.hamcrest.core.IsCollectionContaining.hasItem;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
public class ZookeeperStatusServiceTest {
private TestingServer testingServer;
private ZookeeperStatusService zookeeperStatusService;
private Curator curator;
+ private final OrchestratorContext context = mock(OrchestratorContext.class);
@Before
public void setUp() throws Exception {
@@ -48,6 +52,8 @@ public class ZookeeperStatusServiceTest {
testingServer = new TestingServer();
curator = createConnectedCurator(testingServer);
zookeeperStatusService = new ZookeeperStatusService(curator);
+ when(context.getTimeLeft()).thenReturn(Duration.ofSeconds(10));
+ when(context.isProbe()).thenReturn(false);
}
private static Curator createConnectedCuratorFramework(TestingServer server) throws InterruptedException {
@@ -81,9 +87,8 @@ public class ZookeeperStatusServiceTest {
@Test
public void setting_host_state_is_idempotent() {
- try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly(
- TestIds.APPLICATION_INSTANCE_REFERENCE,
- Duration.ofSeconds(10))) {
+ try (MutableStatusRegistry statusRegistry = zookeeperStatusService
+ .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) {
//shuffling to catch "clean database" failures for all cases.
for (HostStatus hostStatus: shuffledList(HostStatus.values())) {
@@ -106,13 +111,12 @@ public class ZookeeperStatusServiceTest {
ZookeeperStatusService zookeeperStatusService2 = new ZookeeperStatusService(curator);
final CompletableFuture<Void> lockedSuccessfullyFuture;
- try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly(
- TestIds.APPLICATION_INSTANCE_REFERENCE,
- Duration.ofSeconds(10))) {
+ try (MutableStatusRegistry statusRegistry = zookeeperStatusService
+ .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) {
lockedSuccessfullyFuture = CompletableFuture.runAsync(() -> {
try (MutableStatusRegistry statusRegistry2 = zookeeperStatusService2
- .lockApplicationInstance_forCurrentThreadOnly(TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(10)))
+ .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE))
{
}
});
@@ -133,14 +137,13 @@ public class ZookeeperStatusServiceTest {
try (Curator curator = createConnectedCuratorFramework(testingServer)) {
ZookeeperStatusService zookeeperStatusService2 = new ZookeeperStatusService(curator);
- try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly(
- TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(10))) {
+ try (MutableStatusRegistry statusRegistry = zookeeperStatusService
+ .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) {
//must run in separate thread, since having 2 locks in the same thread fails
CompletableFuture<Void> resultOfZkOperationAfterLockFailure = CompletableFuture.runAsync(() -> {
try {
- zookeeperStatusService2.lockApplicationInstance_forCurrentThreadOnly(
- TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(1));
+ zookeeperStatusService2.lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE);
fail("Both zookeeper host status services locked simultaneously for the same application instance");
} catch (RuntimeException e) {
}
@@ -213,8 +216,8 @@ public class ZookeeperStatusServiceTest {
is(ApplicationInstanceStatus.NO_REMARKS));
// Suspend
- try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly(
- TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(10))) {
+ try (MutableStatusRegistry statusRegistry = zookeeperStatusService
+ .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) {
statusRegistry.setApplicationInstanceStatus(ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN);
}
@@ -225,8 +228,8 @@ public class ZookeeperStatusServiceTest {
is(ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN));
// Resume
- try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly(
- TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(10))) {
+ try (MutableStatusRegistry statusRegistry = zookeeperStatusService
+ .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) {
statusRegistry.setApplicationInstanceStatus(ApplicationInstanceStatus.NO_REMARKS);
}
@@ -243,13 +246,13 @@ public class ZookeeperStatusServiceTest {
= zookeeperStatusService.getAllSuspendedApplications();
assertThat(suspendedApps.size(), is(0));
- try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly(
- TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(10))) {
+ try (MutableStatusRegistry statusRegistry = zookeeperStatusService
+ .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) {
statusRegistry.setApplicationInstanceStatus(ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN);
}
- try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly(
- TestIds.APPLICATION_INSTANCE_REFERENCE2, Duration.ofSeconds(10))) {
+ try (MutableStatusRegistry statusRegistry = zookeeperStatusService
+ .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE2)) {
statusRegistry.setApplicationInstanceStatus(ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN);
}