From fb37b7df39bd3e8a422f5087264e4a1accb02ab7 Mon Sep 17 00:00:00 2001 From: Håkon Hallingstad Date: Mon, 5 Nov 2018 09:01:08 +0100 Subject: Send probe when suspending many nodes When suspending all nodes on a host, first do a suspend-all probe that will try to suspend the nodes as normal in Orchestrator and cluster controller, but actually not commit anything. A probe failure will result in the same failure as a non-probe failure: A 409 response with description is sent back to the client. --- .../vespa/orchestrator/OrchestratorContext.java | 15 +++----- .../yahoo/vespa/orchestrator/OrchestratorImpl.java | 34 ++++++++++-------- .../controller/ClusterControllerClientImpl.java | 8 +++-- .../controller/ClusterControllerStateRequest.java | 27 ++++++++------ .../vespa/orchestrator/model/ApplicationApi.java | 3 +- .../orchestrator/model/ApplicationApiImpl.java | 20 +++-------- .../vespa/orchestrator/model/StorageNodeImpl.java | 6 ++++ .../orchestrator/policy/HostedVespaPolicy.java | 13 ++----- .../orchestrator/status/InMemoryStatusService.java | 6 ++-- .../vespa/orchestrator/status/StatusService.java | 8 ++--- .../status/ZookeeperStatusService.java | 22 ++++++++---- .../vespa/orchestrator/OrchestratorImplTest.java | 20 ++++++++--- .../controller/ClusterControllerClientTest.java | 41 +++++++++++++++------- .../orchestrator/policy/HostedVespaPolicyTest.java | 12 +++---- .../orchestrator/resources/HostResourceTest.java | 2 +- .../status/ZookeeperStatusServiceTest.java | 41 ++++++++++++---------- 16 files changed, 157 insertions(+), 121 deletions(-) diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorContext.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorContext.java index f1f572621ce..d3bdaa6dc64 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorContext.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorContext.java @@ -22,16 +22,16 @@ public class OrchestratorContext { private final Clock clock; private final TimeBudget timeBudget; - private boolean probe; + private final boolean probe; /** Create an OrchestratorContext for operations on multiple applications. */ public static OrchestratorContext createContextForMultiAppOp(Clock clock) { - return new OrchestratorContext(clock, TimeBudget.fromNow(clock, DEFAULT_TIMEOUT_FOR_BATCH_OP), true); + return new OrchestratorContext(clock, TimeBudget.fromNow(clock, DEFAULT_TIMEOUT_FOR_BATCH_OP), false); } /** Create an OrchestratorContext for an operation on a single application. */ public static OrchestratorContext createContextForSingleAppOp(Clock clock) { - return new OrchestratorContext(clock, TimeBudget.fromNow(clock, DEFAULT_TIMEOUT_FOR_SINGLE_OP), true); + return new OrchestratorContext(clock, TimeBudget.fromNow(clock, DEFAULT_TIMEOUT_FOR_SINGLE_OP), false); } private OrchestratorContext(Clock clock, TimeBudget timeBudget, boolean probe) { @@ -48,13 +48,6 @@ public class OrchestratorContext { return new ClusterControllerClientTimeouts(timeBudget.timeLeftAsTimeBudget()); } - - /** Mark this operation as a non-committal probe. */ - public OrchestratorContext markAsProbe() { - this.probe = true; - return this; - } - /** Whether the operation is a no-op probe to test whether it would have succeeded, if it had been committal. */ public boolean isProbe() { return probe; @@ -69,7 +62,7 @@ public class OrchestratorContext { } /** Create an OrchestratorContext for an operation on a single application, but limited to current timeout. */ - public OrchestratorContext createSubcontextForSingleAppOp() { + public OrchestratorContext createSubcontextForSingleAppOp(boolean probe) { Instant now = clock.instant(); Instant deadline = timeBudget.deadline().get(); Instant maxDeadline = now.plus(DEFAULT_TIMEOUT_FOR_SINGLE_OP); diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java index 6811788ffb7..77bb1e99e19 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/OrchestratorImpl.java @@ -107,7 +107,7 @@ public class OrchestratorImpl implements Orchestrator { ApplicationInstanceReference reference = getApplicationInstance(hostName).reference(); OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock); try (MutableStatusRegistry statusRegistry = statusService - .lockApplicationInstance_forCurrentThreadOnly(reference, context.getTimeLeft())) { + .lockApplicationInstance_forCurrentThreadOnly(context, reference)) { statusRegistry.setHostState(hostName, status); } } @@ -130,9 +130,8 @@ public class OrchestratorImpl implements Orchestrator { ApplicationInstance appInstance = getApplicationInstance(hostName); OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock); - try (MutableStatusRegistry statusRegistry = statusService.lockApplicationInstance_forCurrentThreadOnly( - appInstance.reference(), - context.getTimeLeft())) { + try (MutableStatusRegistry statusRegistry = statusService + .lockApplicationInstance_forCurrentThreadOnly(context, appInstance.reference())) { final HostStatus currentHostState = statusRegistry.getHostStatus(hostName); if (HostStatus.NO_REMARKS == currentHostState) { @@ -159,9 +158,8 @@ public class OrchestratorImpl implements Orchestrator { NodeGroup nodeGroup = new NodeGroup(appInstance, hostName); OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock); - try (MutableStatusRegistry statusRegistry = statusService.lockApplicationInstance_forCurrentThreadOnly( - appInstance.reference(), - context.getTimeLeft())) { + try (MutableStatusRegistry statusRegistry = statusService + .lockApplicationInstance_forCurrentThreadOnly(context, appInstance.reference())) { ApplicationApi applicationApi = new ApplicationApiImpl( nodeGroup, statusRegistry, @@ -181,9 +179,7 @@ public class OrchestratorImpl implements Orchestrator { ApplicationInstanceReference applicationReference = nodeGroup.getApplicationReference(); try (MutableStatusRegistry hostStatusRegistry = - statusService.lockApplicationInstance_forCurrentThreadOnly( - applicationReference, - context.getTimeLeft())) { + statusService.lockApplicationInstance_forCurrentThreadOnly(context, applicationReference)) { ApplicationInstanceStatus appStatus = statusService.forApplicationInstance(applicationReference).getApplicationInstanceStatus(); if (appStatus == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN) { return; @@ -222,6 +218,8 @@ public class OrchestratorImpl implements Orchestrator { @Override public void suspendAll(HostName parentHostname, List hostNames) throws BatchHostStateChangeDeniedException, BatchHostNameNotFoundException, BatchInternalErrorException { + OrchestratorContext context = OrchestratorContext.createContextForMultiAppOp(clock); + List nodeGroupsOrderedByApplication; try { nodeGroupsOrderedByApplication = nodeGroupsOrderedForSuspend(hostNames); @@ -229,10 +227,18 @@ public class OrchestratorImpl implements Orchestrator { throw new BatchHostNameNotFoundException(parentHostname, hostNames, e); } - OrchestratorContext context = OrchestratorContext.createContextForMultiAppOp(clock); + suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, true); + suspendAllNodeGroups(context, parentHostname, nodeGroupsOrderedByApplication, false); + } + + private void suspendAllNodeGroups(OrchestratorContext context, + HostName parentHostname, + List nodeGroupsOrderedByApplication, + boolean probe) + throws BatchHostStateChangeDeniedException, BatchInternalErrorException { for (NodeGroup nodeGroup : nodeGroupsOrderedByApplication) { try { - suspendGroup(context.createSubcontextForSingleAppOp(), nodeGroup); + suspendGroup(context.createSubcontextForSingleAppOp(probe), nodeGroup); } catch (HostStateChangeDeniedException e) { throw new BatchHostStateChangeDeniedException(parentHostname, nodeGroup, e); } catch (RuntimeException e) { @@ -307,9 +313,7 @@ public class OrchestratorImpl implements Orchestrator { OrchestratorContext context = OrchestratorContext.createContextForSingleAppOp(clock); ApplicationInstanceReference appRef = OrchestratorUtil.toApplicationInstanceReference(appId, instanceLookupService); try (MutableStatusRegistry statusRegistry = - statusService.lockApplicationInstance_forCurrentThreadOnly( - appRef, - context.getTimeLeft())) { + statusService.lockApplicationInstance_forCurrentThreadOnly(context, appRef)) { // Short-circuit if already in wanted state if (status == statusRegistry.getApplicationInstanceStatus()) return; diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientImpl.java index 50ffe13b437..04725c330e0 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientImpl.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientImpl.java @@ -35,7 +35,10 @@ public class ClusterControllerClientImpl implements ClusterControllerClient{ int storageNodeIndex, ClusterControllerNodeState wantedState) throws IOException { ClusterControllerStateRequest.State state = new ClusterControllerStateRequest.State(wantedState, REQUEST_REASON); - ClusterControllerStateRequest stateRequest = new ClusterControllerStateRequest(state, ClusterControllerStateRequest.Condition.SAFE); + ClusterControllerStateRequest stateRequest = new ClusterControllerStateRequest( + state, + ClusterControllerStateRequest.Condition.SAFE, + context.isProbe() ? true : null); ClusterControllerClientTimeouts timeouts = context.getClusterControllerTimeouts(); try { @@ -67,7 +70,8 @@ public class ClusterControllerClientImpl implements ClusterControllerClient{ OrchestratorContext context, ClusterControllerNodeState wantedState) throws IOException { ClusterControllerStateRequest.State state = new ClusterControllerStateRequest.State(wantedState, REQUEST_REASON); - ClusterControllerStateRequest stateRequest = new ClusterControllerStateRequest(state, ClusterControllerStateRequest.Condition.FORCE); + ClusterControllerStateRequest stateRequest = new ClusterControllerStateRequest( + state, ClusterControllerStateRequest.Condition.FORCE, null); ClusterControllerClientTimeouts timeouts = context.getClusterControllerTimeouts(); try { diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerStateRequest.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerStateRequest.java index 9a29ad04d78..b07a9e2bfa1 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerStateRequest.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerStateRequest.java @@ -1,8 +1,10 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.orchestrator.controller; +import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; +import javax.annotation.concurrent.Immutable; import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -10,6 +12,8 @@ import java.util.Objects; /** * @author hakonhall */ +@Immutable +@JsonInclude(JsonInclude.Include.NON_NULL) public class ClusterControllerStateRequest { @JsonProperty("state") @@ -18,26 +22,29 @@ public class ClusterControllerStateRequest { @JsonProperty("condition") public final Condition condition; - public ClusterControllerStateRequest(State currentState, Condition condition) { + @JsonProperty("probe") + public final Boolean probe; + + public ClusterControllerStateRequest(State currentState, Condition condition, Boolean probe) { Map state = Collections.singletonMap("user", currentState); this.state = Collections.unmodifiableMap(state); this.condition = condition; + this.probe = probe; } @Override - public boolean equals(Object object) { - if (!(object instanceof ClusterControllerStateRequest)) { - return false; - } - - final ClusterControllerStateRequest that = (ClusterControllerStateRequest) object; - return Objects.equals(this.state, that.state) - && Objects.equals(this.condition, that.condition); + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClusterControllerStateRequest that = (ClusterControllerStateRequest) o; + return Objects.equals(state, that.state) && + condition == that.condition && + Objects.equals(probe, that.probe); } @Override public int hashCode() { - return Objects.hash(state, condition); + return Objects.hash(state, condition, probe); } @Override diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java index 0ca509d13f1..e2f371a5ce1 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApi.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.orchestrator.model; import com.yahoo.config.provision.ApplicationId; import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.vespa.orchestrator.OrchestratorContext; import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus; import com.yahoo.vespa.orchestrator.status.HostStatus; @@ -26,7 +27,7 @@ public interface ApplicationApi { ApplicationInstanceStatus getApplicationStatus(); - void setHostState(HostName hostName, HostStatus status); + void setHostState(OrchestratorContext context, HostName hostName, HostStatus status); List getNodesInGroupWithStatus(HostStatus status); List getStorageNodesInGroupInClusterOrder(); diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java index c5bcaf4de82..9ec1697a45f 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/ApplicationApiImpl.java @@ -6,12 +6,12 @@ import com.yahoo.vespa.applicationmodel.ApplicationInstance; import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.applicationmodel.ServiceCluster; import com.yahoo.vespa.applicationmodel.ServiceInstance; +import com.yahoo.vespa.orchestrator.OrchestratorContext; import com.yahoo.vespa.orchestrator.OrchestratorUtil; import com.yahoo.vespa.orchestrator.controller.ClusterControllerClientFactory; import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus; import com.yahoo.vespa.orchestrator.status.HostStatus; import com.yahoo.vespa.orchestrator.status.MutableStatusRegistry; -import com.yahoo.vespa.orchestrator.status.ReadOnlyStatusRegistry; import java.util.Collection; import java.util.Comparator; @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.function.Function; import java.util.stream.Collectors; import static com.yahoo.vespa.orchestrator.OrchestratorUtil.getHostsUsedByApplicationInstance; @@ -29,7 +30,6 @@ public class ApplicationApiImpl implements ApplicationApi { private final NodeGroup nodeGroup; private final MutableStatusRegistry hostStatusService; private final List clusterInOrder; - private final ClusterControllerClientFactory clusterControllerClientFactory; private final Map hostStatusMap; public ApplicationApiImpl(NodeGroup nodeGroup, @@ -38,11 +38,9 @@ public class ApplicationApiImpl implements ApplicationApi { this.applicationInstance = nodeGroup.getApplication(); this.nodeGroup = nodeGroup; this.hostStatusService = hostStatusService; - this.hostStatusMap = createHostStatusMap( - getHostsUsedByApplicationInstance(applicationInstance), - hostStatusService); + Collection hosts = getHostsUsedByApplicationInstance(applicationInstance); + this.hostStatusMap = hosts.stream().collect(Collectors.toMap(Function.identity(), hostStatusService::getHostStatus)); this.clusterInOrder = makeClustersInOrder(nodeGroup, hostStatusMap, clusterControllerClientFactory); - this.clusterControllerClientFactory = clusterControllerClientFactory; } @Override @@ -50,14 +48,6 @@ public class ApplicationApiImpl implements ApplicationApi { return OrchestratorUtil.toApplicationId(applicationInstance.reference()); } - private static Map createHostStatusMap(Collection hosts, - ReadOnlyStatusRegistry hostStatusService) { - return hosts.stream() - .collect(Collectors.toMap( - hostName -> hostName, - hostName -> hostStatusService.getHostStatus(hostName))); - } - private HostStatus getHostStatus(HostName hostName) { return hostStatusMap.getOrDefault(hostName, HostStatus.NO_REMARKS); } @@ -104,7 +94,7 @@ public class ApplicationApiImpl implements ApplicationApi { } @Override - public void setHostState(HostName hostName, HostStatus status) { + public void setHostState(OrchestratorContext context, HostName hostName, HostStatus status) { hostStatusService.setHostState(hostName, status); } diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java index 9900c8de752..ade45c6c515 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java @@ -104,6 +104,12 @@ public class StorageNodeImpl implements StorageNode { HostedVespaPolicy.SET_NODE_STATE_CONSTRAINT, "Failed to set state to " + wantedNodeState + " in cluster controller: " + response.reason); } + + String logSuffix = context.isProbe() ? + " has been set to " + wantedNodeState: + " would have been set to " + wantedNodeState + " (this is a probe)"; + logger.log(LogLevel.INFO, "Storage node " + nodeIndex + " in cluster " + clusterId + + " application " + applicationInstance.reference().asString() + " on host " + hostName() + logSuffix); } @Override diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java index a781fd2358a..4aa0f3452e3 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicy.java @@ -1,7 +1,6 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.orchestrator.policy; -import com.yahoo.log.LogLevel; import com.yahoo.vespa.applicationmodel.ApplicationInstance; import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.orchestrator.OrchestratorContext; @@ -52,13 +51,11 @@ public class HostedVespaPolicy implements Policy { // These storage nodes are guaranteed to be NO_REMARKS for (StorageNode storageNode : application.getUpStorageNodesInGroupInClusterOrder()) { storageNode.setNodeState(context, ClusterControllerNodeState.MAINTENANCE); - log.log(LogLevel.INFO, "The storage node on " + storageNode.hostName() + " has been set to MAINTENANCE"); } // Ensure all nodes in the group are marked as allowed to be down for (HostName hostName : application.getNodesInGroupWithStatus(HostStatus.NO_REMARKS)) { - application.setHostState(hostName, HostStatus.ALLOWED_TO_BE_DOWN); - log.log(LogLevel.INFO, hostName + " is now allowed to be down (suspended)"); + application.setHostState(context, hostName, HostStatus.ALLOWED_TO_BE_DOWN); } } @@ -68,12 +65,10 @@ public class HostedVespaPolicy implements Policy { // Always defer to Cluster Controller whether it's OK to resume storage node for (StorageNode storageNode : application.getStorageNodesAllowedToBeDownInGroupInReverseClusterOrder()) { storageNode.setNodeState(context, ClusterControllerNodeState.UP); - log.log(LogLevel.INFO, "The storage node on " + storageNode.hostName() + " has been set to UP"); } for (HostName hostName : application.getNodesInGroupWithStatus(HostStatus.ALLOWED_TO_BE_DOWN)) { - application.setHostState(hostName, HostStatus.NO_REMARKS); - log.log(LogLevel.INFO, hostName + " is no longer allowed to be down (resumed)"); + application.setHostState(context, hostName, HostStatus.NO_REMARKS); } } @@ -98,13 +93,11 @@ public class HostedVespaPolicy implements Policy { // These storage nodes are guaranteed to be NO_REMARKS for (StorageNode storageNode : applicationApi.getStorageNodesInGroupInClusterOrder()) { storageNode.setNodeState(context, ClusterControllerNodeState.DOWN); - log.log(LogLevel.INFO, "The storage node on " + storageNode.hostName() + " has been set DOWN"); } // Ensure all nodes in the group are marked as allowed to be down for (HostName hostName : applicationApi.getNodesInGroupWithStatus(HostStatus.NO_REMARKS)) { - applicationApi.setHostState(hostName, HostStatus.ALLOWED_TO_BE_DOWN); - log.log(LogLevel.INFO, hostName + " is now allowed to be down (suspended)"); + applicationApi.setHostState(context, hostName, HostStatus.ALLOWED_TO_BE_DOWN); } } diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/InMemoryStatusService.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/InMemoryStatusService.java index bd5eb6f3e29..70128ae12eb 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/InMemoryStatusService.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/InMemoryStatusService.java @@ -3,8 +3,8 @@ package com.yahoo.vespa.orchestrator.status; import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference; import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.vespa.orchestrator.OrchestratorContext; -import java.time.Duration; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -46,8 +46,8 @@ public class InMemoryStatusService implements StatusService { @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( - ApplicationInstanceReference applicationInstanceReference, - Duration timeout) { + OrchestratorContext context, + ApplicationInstanceReference applicationInstanceReference) { Lock lock = instanceLockService.get(applicationInstanceReference); return new InMemoryMutableStatusRegistry(lock, applicationInstanceReference); } diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/StatusService.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/StatusService.java index 76adef72b2b..99f6c113193 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/StatusService.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/StatusService.java @@ -2,8 +2,8 @@ package com.yahoo.vespa.orchestrator.status; import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference; +import com.yahoo.vespa.orchestrator.OrchestratorContext; -import java.time.Duration; import java.util.Set; /** @@ -25,7 +25,7 @@ public interface StatusService { * possibly inconsistent snapshot values. It is not recommended that this method is used for anything other * than monitoring, logging, debugging, etc. It should never be used for multi-step operations (e.g. * read-then-write) where consistency is required. For those cases, use - * {@link #lockApplicationInstance_forCurrentThreadOnly(ApplicationInstanceReference, Duration)}. + * {@link #lockApplicationInstance_forCurrentThreadOnly(OrchestratorContext, ApplicationInstanceReference)}. */ ReadOnlyStatusRegistry forApplicationInstance(ApplicationInstanceReference applicationInstanceReference); @@ -54,8 +54,8 @@ public interface StatusService { * This may leave the registry in an inconsistent state (as judged by the client code). */ MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( - ApplicationInstanceReference applicationInstanceReference, - Duration timeout); + OrchestratorContext context, + ApplicationInstanceReference applicationInstanceReference); /** * Returns all application instances that are allowed to be down. The intention is to use this diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusService.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusService.java index 7df29e038c1..3360a12c32e 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusService.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusService.java @@ -7,6 +7,7 @@ import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference; import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.curator.Curator; import com.yahoo.vespa.curator.Lock; +import com.yahoo.vespa.orchestrator.OrchestratorContext; import com.yahoo.vespa.orchestrator.OrchestratorUtil; import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex; import org.apache.zookeeper.KeeperException.NoNodeException; @@ -90,14 +91,15 @@ public class ZookeeperStatusService implements StatusService { */ @Override public MutableStatusRegistry lockApplicationInstance_forCurrentThreadOnly( - ApplicationInstanceReference applicationInstanceReference, - Duration timeout) { + OrchestratorContext context, + ApplicationInstanceReference applicationInstanceReference) { + Duration duration = context.getTimeLeft(); String lockPath = applicationInstanceLock2Path(applicationInstanceReference); Lock lock = new Lock(lockPath, curator); - lock.acquire(timeout); + lock.acquire(duration); try { - return new ZkMutableStatusRegistry(lock, applicationInstanceReference); + return new ZkMutableStatusRegistry(lock, applicationInstanceReference, context.isProbe()); } catch (Throwable t) { // In case the constructor throws an exception. lock.close(); @@ -215,23 +217,31 @@ public class ZookeeperStatusService implements StatusService { private class ZkMutableStatusRegistry implements MutableStatusRegistry { private final Lock lock; private final ApplicationInstanceReference applicationInstanceReference; + private final boolean probe; public ZkMutableStatusRegistry( Lock lock, - ApplicationInstanceReference applicationInstanceReference) { + ApplicationInstanceReference applicationInstanceReference, + boolean probe) { this.lock = lock; this.applicationInstanceReference = applicationInstanceReference; + this.probe = probe; } @Override public void setHostState(final HostName hostName, final HostStatus status) { + if (probe) return; + log.log(LogLevel.INFO, "Setting host " + hostName + " to status " + status); setHostStatus(applicationInstanceReference, hostName, status); } @Override public void setApplicationInstanceStatus(ApplicationInstanceStatus applicationInstanceStatus) { - String path = applicationInstanceSuspendedPath(applicationInstanceReference); + if (probe) return; + + log.log(LogLevel.INFO, "Setting app " + applicationInstanceReference.asString() + " to status " + applicationInstanceStatus); + String path = applicationInstanceSuspendedPath(applicationInstanceReference); try { switch (applicationInstanceStatus) { case NO_REMARKS: diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java index 80174d05a54..cc6b9a7dbf7 100644 --- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java +++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/OrchestratorImplTest.java @@ -16,6 +16,7 @@ import com.yahoo.vespa.applicationmodel.TenantId; import com.yahoo.vespa.orchestrator.config.OrchestratorConfig; import com.yahoo.vespa.orchestrator.controller.ClusterControllerClientFactory; import com.yahoo.vespa.orchestrator.controller.ClusterControllerClientFactoryMock; +import com.yahoo.vespa.orchestrator.model.NodeGroup; import com.yahoo.vespa.orchestrator.policy.BatchHostStateChangeDeniedException; import com.yahoo.vespa.orchestrator.policy.HostStateChangeDeniedException; import com.yahoo.vespa.orchestrator.status.HostStatus; @@ -25,6 +26,7 @@ import com.yahoo.vespa.orchestrator.status.StatusService; import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.mockito.ArgumentCaptor; import org.mockito.InOrder; import java.util.Arrays; @@ -247,8 +249,6 @@ public class OrchestratorImplTest { // A spy is preferential because suspendAll() relies on delegating the hard work to suspend() and resume(). OrchestratorImpl orchestrator = spy(this.orchestrator); - OrchestratorContext context = mock(OrchestratorContext.class); - orchestrator.suspendAll( new HostName("parentHostname"), Arrays.asList( @@ -261,12 +261,22 @@ public class OrchestratorImplTest { // TEST6: tenant-id-3:application-instance-3:default // TEST1: test-tenant-id:application:instance InOrder order = inOrder(orchestrator); - order.verify(orchestrator).suspendGroup(any(), eq(DummyInstanceLookupService.TEST3_NODE_GROUP)); - order.verify(orchestrator).suspendGroup(any(), eq(DummyInstanceLookupService.TEST6_NODE_GROUP)); - order.verify(orchestrator).suspendGroup(any(), eq(DummyInstanceLookupService.TEST1_NODE_GROUP)); + verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST3_NODE_GROUP, true); + verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST6_NODE_GROUP, true); + verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST1_NODE_GROUP, true); + verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST3_NODE_GROUP, false); + verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST6_NODE_GROUP, false); + verifySuspendGroup(order, orchestrator, DummyInstanceLookupService.TEST1_NODE_GROUP, false); order.verifyNoMoreInteractions(); } + private void verifySuspendGroup(InOrder order, OrchestratorImpl orchestrator, NodeGroup nodeGroup, boolean probe) + throws HostStateChangeDeniedException{ + ArgumentCaptor argument = ArgumentCaptor.forClass(OrchestratorContext.class); + order.verify(orchestrator).suspendGroup(argument.capture(), eq(nodeGroup)); + assertEquals(probe, argument.getValue().isProbe()); + } + @Test public void whenSuspendAllFails() throws Exception { // A spy is preferential because suspendAll() relies on delegating the hard work to suspend() and resume(). diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientTest.java index 35dda403aed..1eca61c8a0a 100644 --- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientTest.java +++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/controller/ClusterControllerClientTest.java @@ -4,8 +4,10 @@ package com.yahoo.vespa.orchestrator.controller; import com.yahoo.vespa.jaxrs.client.JaxRsStrategy; import com.yahoo.vespa.jaxrs.client.LocalPassThroughJaxRsStrategy; import com.yahoo.vespa.orchestrator.OrchestratorContext; +import org.junit.Before; import org.junit.Test; +import java.io.IOException; import java.time.Duration; import static org.mockito.Matchers.eq; @@ -18,25 +20,38 @@ public class ClusterControllerClientTest { private static final String CLUSTER_NAME = "clusterName"; private static final int STORAGE_NODE_INDEX = 0; - @Test - public void correctParametersArePassedThrough() throws Exception { - final ClusterControllerJaxRsApi clusterControllerApi = mock(ClusterControllerJaxRsApi.class); - final JaxRsStrategy strategyMock = new LocalPassThroughJaxRsStrategy<>(clusterControllerApi); - final ClusterControllerClient clusterControllerClient = new ClusterControllerClientImpl( - strategyMock, - CLUSTER_NAME); - - final ClusterControllerNodeState wantedState = ClusterControllerNodeState.MAINTENANCE; + private final ClusterControllerJaxRsApi clusterControllerApi = mock(ClusterControllerJaxRsApi.class); + private final JaxRsStrategy strategyMock = new LocalPassThroughJaxRsStrategy<>(clusterControllerApi); + private final ClusterControllerClient clusterControllerClient = new ClusterControllerClientImpl(strategyMock, CLUSTER_NAME); + private final ClusterControllerNodeState wantedState = ClusterControllerNodeState.MAINTENANCE; + private final OrchestratorContext context = mock(OrchestratorContext.class); + private final ClusterControllerClientTimeouts timeouts = mock(ClusterControllerClientTimeouts.class); + private final ClusterControllerStateRequest.State state = new ClusterControllerStateRequest.State(wantedState, ClusterControllerClientImpl.REQUEST_REASON); - OrchestratorContext context = mock(OrchestratorContext.class); - ClusterControllerClientTimeouts timeouts = mock(ClusterControllerClientTimeouts.class); + @Before + public void setUp() { when(context.getClusterControllerTimeouts()).thenReturn(timeouts); + when(context.isProbe()).thenReturn(false); when(timeouts.getServerTimeoutOrThrow()).thenReturn(Duration.ofSeconds(1)); + } + + @Test + public void correctParametersArePassedThrough() throws IOException { + setNodeStateAndVerify(null); + } + + @Test + public void probingIsCorrectlyPassedThrough() throws IOException { + when(context.isProbe()).thenReturn(true); + setNodeStateAndVerify(true); + } + + private void setNodeStateAndVerify(Boolean expectedProbe) throws IOException { clusterControllerClient.setNodeState(context, STORAGE_NODE_INDEX, wantedState); final ClusterControllerStateRequest expectedNodeStateRequest = new ClusterControllerStateRequest( - new ClusterControllerStateRequest.State(wantedState, ClusterControllerClientImpl.REQUEST_REASON), - ClusterControllerStateRequest.Condition.SAFE); + state, ClusterControllerStateRequest.Condition.SAFE, expectedProbe); + verify(clusterControllerApi, times(1)) .setNodeState( eq(CLUSTER_NAME), diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java index 329c9576f2c..364402de6de 100644 --- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java +++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/policy/HostedVespaPolicyTest.java @@ -84,9 +84,9 @@ public class HostedVespaPolicyTest { order.verify(storageNode3).setNodeState(context, ClusterControllerNodeState.MAINTENANCE); order.verify(applicationApi).getNodesInGroupWithStatus(HostStatus.NO_REMARKS); - order.verify(applicationApi).setHostState(hostName1, HostStatus.ALLOWED_TO_BE_DOWN); - order.verify(applicationApi).setHostState(hostName2, HostStatus.ALLOWED_TO_BE_DOWN); - order.verify(applicationApi).setHostState(hostName3, HostStatus.ALLOWED_TO_BE_DOWN); + order.verify(applicationApi).setHostState(context, hostName1, HostStatus.ALLOWED_TO_BE_DOWN); + order.verify(applicationApi).setHostState(context, hostName2, HostStatus.ALLOWED_TO_BE_DOWN); + order.verify(applicationApi).setHostState(context, hostName3, HostStatus.ALLOWED_TO_BE_DOWN); order.verifyNoMoreInteractions(); } @@ -135,9 +135,9 @@ public class HostedVespaPolicyTest { order.verify(storageNode3).setNodeState(context, ClusterControllerNodeState.DOWN); order.verify(applicationApi).getNodesInGroupWithStatus(HostStatus.NO_REMARKS); - order.verify(applicationApi).setHostState(hostName1, HostStatus.ALLOWED_TO_BE_DOWN); - order.verify(applicationApi).setHostState(hostName2, HostStatus.ALLOWED_TO_BE_DOWN); - order.verify(applicationApi).setHostState(hostName3, HostStatus.ALLOWED_TO_BE_DOWN); + order.verify(applicationApi).setHostState(context, hostName1, HostStatus.ALLOWED_TO_BE_DOWN); + order.verify(applicationApi).setHostState(context, hostName2, HostStatus.ALLOWED_TO_BE_DOWN); + order.verify(applicationApi).setHostState(context, hostName3, HostStatus.ALLOWED_TO_BE_DOWN); order.verifyNoMoreInteractions(); } diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostResourceTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostResourceTest.java index a9b8127e7fe..035d9cd686f 100644 --- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostResourceTest.java +++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/HostResourceTest.java @@ -75,7 +75,7 @@ public class HostResourceTest { static { when(EVERY_HOST_IS_UP_HOST_STATUS_SERVICE.forApplicationInstance(eq(APPLICATION_INSTANCE_REFERENCE))) .thenReturn(EVERY_HOST_IS_UP_MUTABLE_HOST_STATUS_REGISTRY); - when(EVERY_HOST_IS_UP_HOST_STATUS_SERVICE.lockApplicationInstance_forCurrentThreadOnly(eq(APPLICATION_INSTANCE_REFERENCE), any())) + when(EVERY_HOST_IS_UP_HOST_STATUS_SERVICE.lockApplicationInstance_forCurrentThreadOnly(any(), eq(APPLICATION_INSTANCE_REFERENCE))) .thenReturn(EVERY_HOST_IS_UP_MUTABLE_HOST_STATUS_REGISTRY); when(EVERY_HOST_IS_UP_MUTABLE_HOST_STATUS_REGISTRY.getHostStatus(any())) .thenReturn(HostStatus.NO_REMARKS); diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusServiceTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusServiceTest.java index 44847666670..d57b0106b5b 100644 --- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusServiceTest.java +++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/status/ZookeeperStatusServiceTest.java @@ -4,6 +4,7 @@ package com.yahoo.vespa.orchestrator.status; import com.yahoo.log.LogLevel; import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference; import com.yahoo.vespa.curator.Curator; +import com.yahoo.vespa.orchestrator.OrchestratorContext; import com.yahoo.vespa.orchestrator.TestIds; import org.apache.commons.lang.exception.ExceptionUtils; import org.apache.curator.SessionFailRetryLoop.SessionFailedException; @@ -35,11 +36,14 @@ import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsCollectionContaining.hasItem; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class ZookeeperStatusServiceTest { private TestingServer testingServer; private ZookeeperStatusService zookeeperStatusService; private Curator curator; + private final OrchestratorContext context = mock(OrchestratorContext.class); @Before public void setUp() throws Exception { @@ -48,6 +52,8 @@ public class ZookeeperStatusServiceTest { testingServer = new TestingServer(); curator = createConnectedCurator(testingServer); zookeeperStatusService = new ZookeeperStatusService(curator); + when(context.getTimeLeft()).thenReturn(Duration.ofSeconds(10)); + when(context.isProbe()).thenReturn(false); } private static Curator createConnectedCuratorFramework(TestingServer server) throws InterruptedException { @@ -81,9 +87,8 @@ public class ZookeeperStatusServiceTest { @Test public void setting_host_state_is_idempotent() { - try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly( - TestIds.APPLICATION_INSTANCE_REFERENCE, - Duration.ofSeconds(10))) { + try (MutableStatusRegistry statusRegistry = zookeeperStatusService + .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) { //shuffling to catch "clean database" failures for all cases. for (HostStatus hostStatus: shuffledList(HostStatus.values())) { @@ -106,13 +111,12 @@ public class ZookeeperStatusServiceTest { ZookeeperStatusService zookeeperStatusService2 = new ZookeeperStatusService(curator); final CompletableFuture lockedSuccessfullyFuture; - try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly( - TestIds.APPLICATION_INSTANCE_REFERENCE, - Duration.ofSeconds(10))) { + try (MutableStatusRegistry statusRegistry = zookeeperStatusService + .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) { lockedSuccessfullyFuture = CompletableFuture.runAsync(() -> { try (MutableStatusRegistry statusRegistry2 = zookeeperStatusService2 - .lockApplicationInstance_forCurrentThreadOnly(TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(10))) + .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) { } }); @@ -133,14 +137,13 @@ public class ZookeeperStatusServiceTest { try (Curator curator = createConnectedCuratorFramework(testingServer)) { ZookeeperStatusService zookeeperStatusService2 = new ZookeeperStatusService(curator); - try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly( - TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(10))) { + try (MutableStatusRegistry statusRegistry = zookeeperStatusService + .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) { //must run in separate thread, since having 2 locks in the same thread fails CompletableFuture resultOfZkOperationAfterLockFailure = CompletableFuture.runAsync(() -> { try { - zookeeperStatusService2.lockApplicationInstance_forCurrentThreadOnly( - TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(1)); + zookeeperStatusService2.lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE); fail("Both zookeeper host status services locked simultaneously for the same application instance"); } catch (RuntimeException e) { } @@ -213,8 +216,8 @@ public class ZookeeperStatusServiceTest { is(ApplicationInstanceStatus.NO_REMARKS)); // Suspend - try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly( - TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(10))) { + try (MutableStatusRegistry statusRegistry = zookeeperStatusService + .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) { statusRegistry.setApplicationInstanceStatus(ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN); } @@ -225,8 +228,8 @@ public class ZookeeperStatusServiceTest { is(ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN)); // Resume - try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly( - TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(10))) { + try (MutableStatusRegistry statusRegistry = zookeeperStatusService + .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) { statusRegistry.setApplicationInstanceStatus(ApplicationInstanceStatus.NO_REMARKS); } @@ -243,13 +246,13 @@ public class ZookeeperStatusServiceTest { = zookeeperStatusService.getAllSuspendedApplications(); assertThat(suspendedApps.size(), is(0)); - try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly( - TestIds.APPLICATION_INSTANCE_REFERENCE, Duration.ofSeconds(10))) { + try (MutableStatusRegistry statusRegistry = zookeeperStatusService + .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE)) { statusRegistry.setApplicationInstanceStatus(ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN); } - try (MutableStatusRegistry statusRegistry = zookeeperStatusService.lockApplicationInstance_forCurrentThreadOnly( - TestIds.APPLICATION_INSTANCE_REFERENCE2, Duration.ofSeconds(10))) { + try (MutableStatusRegistry statusRegistry = zookeeperStatusService + .lockApplicationInstance_forCurrentThreadOnly(context, TestIds.APPLICATION_INSTANCE_REFERENCE2)) { statusRegistry.setApplicationInstanceStatus(ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN); } -- cgit v1.2.3 From f3605257d66124d23d49d73db744e7adc0770f41 Mon Sep 17 00:00:00 2001 From: Håkon Hallingstad Date: Mon, 5 Nov 2018 14:48:54 +0100 Subject: Fix probe message --- .../main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java index ade45c6c515..0e4fe672725 100644 --- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java +++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/model/StorageNodeImpl.java @@ -106,8 +106,8 @@ public class StorageNodeImpl implements StorageNode { } String logSuffix = context.isProbe() ? - " has been set to " + wantedNodeState: - " would have been set to " + wantedNodeState + " (this is a probe)"; + " would have been set to " + wantedNodeState + " (this is a probe)" : + " has been set to " + wantedNodeState; logger.log(LogLevel.INFO, "Storage node " + nodeIndex + " in cluster " + clusterId + " application " + applicationInstance.reference().asString() + " on host " + hostName() + logSuffix); } -- cgit v1.2.3