diff options
author | Arnstein Ressem <aressem@yahoo-inc.com> | 2016-10-10 14:24:38 +0200 |
---|---|---|
committer | Arnstein Ressem <aressem@yahoo-inc.com> | 2016-10-10 14:24:38 +0200 |
commit | c355f4c97a5455f46ff9c779b6320060f67211d0 (patch) | |
tree | fc8005b46c3661d02a6c2cc2c810af21a5ae85eb | |
parent | 2eacefe6b4c7b7981c0fcec0a1fa5fdaa933ec36 (diff) | |
parent | 6abdd3d8960ce01422e0cc902cba7e2fa9facc67 (diff) |
Merge branch 'master' into aressem/dont-allow-unresolved-symbols-in-shared-libs-or-executables
838 files changed, 18531 insertions, 15760 deletions
diff --git a/application-preprocessor/src/main/java/com/yahoo/application/preprocessor/ApplicationPreprocessor.java b/application-preprocessor/src/main/java/com/yahoo/application/preprocessor/ApplicationPreprocessor.java index 1762a4b9884..fe9225cd7a6 100644 --- a/application-preprocessor/src/main/java/com/yahoo/application/preprocessor/ApplicationPreprocessor.java +++ b/application-preprocessor/src/main/java/com/yahoo/application/preprocessor/ApplicationPreprocessor.java @@ -42,7 +42,7 @@ public class ApplicationPreprocessor { new Zone(environment.orElse(Environment.defaultEnvironment()), region.orElse(RegionName.defaultName())), (a, b) -> { }, logger); - preprocessed.validateXML(logger); + preprocessed.validateXML(); } diff --git a/application/src/main/java/com/yahoo/application/Application.java b/application/src/main/java/com/yahoo/application/Application.java index cc1b785ae0b..cfcce72487b 100644 --- a/application/src/main/java/com/yahoo/application/Application.java +++ b/application/src/main/java/com/yahoo/application/Application.java @@ -44,6 +44,13 @@ import java.util.*; @Beta public final class Application implements AutoCloseable { + /** + * This system property is set to "true" upon creation of an Application. + * This is useful for components which are created by dependendy injection which needs to modify + * their behavior to function without reliance on any processes outside the JVM. + */ + public static final String vespaLocalProperty = "vespa.local"; + private final JDisc container; private final List<ContentCluster> contentClusters; private final Path path; @@ -51,6 +58,7 @@ public final class Application implements AutoCloseable { // For internal use only Application(Path path, Networking networking, boolean deletePathWhenClosing) { + System.setProperty(vespaLocalProperty, "true"); this.path = path; this.deletePathWhenClosing = deletePathWhenClosing; contentClusters = ContentCluster.fromPath(path); diff --git a/application/src/main/java/com/yahoo/application/container/JDisc.java b/application/src/main/java/com/yahoo/application/container/JDisc.java index 0c6caf9fdf9..dba16a0e3fe 100644 --- a/application/src/main/java/com/yahoo/application/container/JDisc.java +++ b/application/src/main/java/com/yahoo/application/container/JDisc.java @@ -23,6 +23,7 @@ import com.yahoo.jdisc.test.TestDriver; import com.yahoo.processing.handler.ProcessingHandler; import com.yahoo.search.handler.SearchHandler; +import java.io.File; import java.nio.file.Path; /** @@ -97,7 +98,7 @@ public final class JDisc implements AutoCloseable { * @param networking enabled or disabled * @return a new JDisc instance */ - public static JDisc fromPath(final Path path, Networking networking) { + public static JDisc fromPath(Path path, Networking networking) { return new JDisc(path, false, networking, new ConfigModelRepo()); } @@ -105,7 +106,7 @@ public final class JDisc implements AutoCloseable { * Create a jDisc instance which is given a config model repo (in which (mock) content clusters * can be looked up). */ - public static JDisc fromPath(final Path path, Networking networking, ConfigModelRepo configModelRepo) { + public static JDisc fromPath(Path path, Networking networking, ConfigModelRepo configModelRepo) { return new JDisc(path, false, networking, configModelRepo); } diff --git a/application/src/test/java/com/yahoo/application/ApplicationTest.java b/application/src/test/java/com/yahoo/application/ApplicationTest.java index 6f4e6103743..7b515cb843b 100644 --- a/application/src/test/java/com/yahoo/application/ApplicationTest.java +++ b/application/src/test/java/com/yahoo/application/ApplicationTest.java @@ -28,6 +28,7 @@ import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.DefaultHttpClient; +import org.junit.Ignore; import org.junit.Test; import java.io.BufferedReader; @@ -363,7 +364,7 @@ public class ApplicationTest { assertEquals(200, statusCode); } } - + private static int getFreePort() throws IOException { try (ServerSocket socket = new ServerSocket(0)) { socket.setReuseAddress(true); diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java new file mode 100644 index 00000000000..05a66ddbf2b --- /dev/null +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java @@ -0,0 +1,69 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import com.yahoo.vdslib.state.ClusterState; +import com.yahoo.vdslib.state.Node; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +public class AnnotatedClusterState { + private final ClusterState clusterState; + private final Map<Node, NodeStateReason> nodeStateReasons; + private final Optional<ClusterStateReason> clusterStateReason; + + public AnnotatedClusterState(ClusterState clusterState, + Optional<ClusterStateReason> clusterStateReason, + Map<Node, NodeStateReason> nodeStateReasons) + { + this.clusterState = clusterState; + this.clusterStateReason = clusterStateReason; + this.nodeStateReasons = nodeStateReasons; + } + + public static AnnotatedClusterState emptyState() { + return new AnnotatedClusterState(ClusterState.emptyState(), Optional.empty(), emptyNodeStateReasons()); + } + + static Map<Node, NodeStateReason> emptyNodeStateReasons() { + return Collections.emptyMap(); + } + + public ClusterState getClusterState() { + return clusterState; + } + + public Map<Node, NodeStateReason> getNodeStateReasons() { + return Collections.unmodifiableMap(nodeStateReasons); + } + + public Optional<ClusterStateReason> getClusterStateReason() { + return clusterStateReason; + } + + @Override + public String toString() { + return clusterState.toString(); + } + + public String toString(boolean verbose) { + return clusterState.toString(verbose); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnnotatedClusterState that = (AnnotatedClusterState) o; + return Objects.equals(clusterState, that.clusterState) && + Objects.equals(nodeStateReasons, that.nodeStateReasons) && + Objects.equals(clusterStateReason, that.clusterStateReason); + } + + @Override + public int hashCode() { + return Objects.hash(clusterState, nodeStateReasons, clusterStateReason); + } +} diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGenerator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGenerator.java new file mode 100644 index 00000000000..e6fbed71153 --- /dev/null +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGenerator.java @@ -0,0 +1,345 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import com.yahoo.vdslib.state.ClusterState; +import com.yahoo.vdslib.state.Node; +import com.yahoo.vdslib.state.NodeState; +import com.yahoo.vdslib.state.NodeType; +import com.yahoo.vdslib.state.State; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.TreeMap; + +/** + * Pure functional cluster state generator which deterministically constructs a full + * cluster state given the state of the content cluster, a set of cluster controller + * configuration parameters and the current time. + * + * State version tracking is considered orthogonal to state generation. Therefore, + * cluster state version is _not_ set here; its incrementing must be handled by the + * caller. + */ +public class ClusterStateGenerator { + + static class Params { + public ContentCluster cluster; + public Map<NodeType, Integer> transitionTimes; + public long currentTimeInMillis = 0; + public int maxPrematureCrashes = 0; + public int minStorageNodesUp = 1; + public int minDistributorNodesUp = 1; + public double minRatioOfStorageNodesUp = 0.0; + public double minRatioOfDistributorNodesUp = 0.0; + public double minNodeRatioPerGroup = 0.0; + public int idealDistributionBits = 16; + public int highestObservedDistributionBitCount = 16; + public int lowestObservedDistributionBitCount = 16; + public int maxInitProgressTimeMs = 5000; + + Params() { + this.transitionTimes = buildTransitionTimeMap(0, 0); + } + + // FIXME de-dupe + static Map<NodeType, Integer> buildTransitionTimeMap(int distributorTransitionTimeMs, int storageTransitionTimeMs) { + Map<com.yahoo.vdslib.state.NodeType, java.lang.Integer> maxTransitionTime = new TreeMap<>(); + maxTransitionTime.put(com.yahoo.vdslib.state.NodeType.DISTRIBUTOR, distributorTransitionTimeMs); + maxTransitionTime.put(com.yahoo.vdslib.state.NodeType.STORAGE, storageTransitionTimeMs); + return maxTransitionTime; + } + + Params cluster(ContentCluster cluster) { + this.cluster = cluster; + return this; + } + Params maxInitProgressTime(int maxTimeMs) { + this.maxInitProgressTimeMs = maxTimeMs; + return this; + } + Params transitionTimes(int timeMs) { + this.transitionTimes = buildTransitionTimeMap(timeMs, timeMs); + return this; + } + Params transitionTimes(Map<NodeType, Integer> timesMs) { + this.transitionTimes = timesMs; + return this; + } + Params currentTimeInMilllis(long currentTimeMs) { + this.currentTimeInMillis = currentTimeMs; + return this; + } + Params maxPrematureCrashes(int count) { + this.maxPrematureCrashes = count; + return this; + } + Params minStorageNodesUp(int nodes) { + this.minStorageNodesUp = nodes; + return this; + } + Params minDistributorNodesUp(int nodes) { + this.minDistributorNodesUp = nodes; + return this; + } + Params minRatioOfStorageNodesUp(double minRatio) { + this.minRatioOfStorageNodesUp = minRatio; + return this; + } + Params minRatioOfDistributorNodesUp(double minRatio) { + this.minRatioOfDistributorNodesUp = minRatio; + return this; + } + Params minNodeRatioPerGroup(double minRatio) { + this.minNodeRatioPerGroup = minRatio; + return this; + } + Params idealDistributionBits(int distributionBits) { + this.idealDistributionBits = distributionBits; + return this; + } + Params highestObservedDistributionBitCount(int bitCount) { + this.highestObservedDistributionBitCount = bitCount; + return this; + } + Params lowestObservedDistributionBitCount(int bitCount) { + this.lowestObservedDistributionBitCount = bitCount; + return this; + } + + /** + * Infer parameters from controller options. Important: does _not_ set cluster; + * it must be explicitly set afterwards on the returned parameter object before + * being used to compute states. + */ + static Params fromOptions(FleetControllerOptions opts) { + return new Params() + .maxPrematureCrashes(opts.maxPrematureCrashes) + .minStorageNodesUp(opts.minStorageNodesUp) + .minDistributorNodesUp(opts.minDistributorNodesUp) + .minRatioOfStorageNodesUp(opts.minRatioOfStorageNodesUp) + .minRatioOfDistributorNodesUp(opts.minRatioOfDistributorNodesUp) + .minNodeRatioPerGroup(opts.minNodeRatioPerGroup) + .idealDistributionBits(opts.distributionBits) + .transitionTimes(opts.maxTransitionTime); + } + } + + static AnnotatedClusterState generatedStateFrom(final Params params) { + final ContentCluster cluster = params.cluster; + final ClusterState workingState = ClusterState.emptyState(); + final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>(); + + for (final NodeInfo nodeInfo : cluster.getNodeInfo()) { + final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params); + workingState.setNodeState(nodeInfo.getNode(), nodeState); + } + + takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params); + + final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params); + if (reasonToBeDown.isPresent()) { + workingState.setClusterState(State.DOWN); + } + workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params)); + + return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons); + } + + private static boolean nodeIsConsideredTooUnstable(final NodeInfo nodeInfo, final Params params) { + return (params.maxPrematureCrashes != 0 + && nodeInfo.getPrematureCrashCount() > params.maxPrematureCrashes); + } + + private static void applyWantedStateToBaselineState(final NodeState baseline, final NodeState wanted) { + // Only copy state and description from Wanted state; this preserves auxiliary + // information such as disk states and startup timestamp. + baseline.setState(wanted.getState()); + baseline.setDescription(wanted.getDescription()); + } + + private static NodeState computeEffectiveNodeState(final NodeInfo nodeInfo, final Params params) { + final NodeState reported = nodeInfo.getReportedState(); + final NodeState wanted = nodeInfo.getWantedState(); + final NodeState baseline = reported.clone(); + + if (nodeIsConsideredTooUnstable(nodeInfo, params)) { + baseline.setState(State.DOWN); + } + if (startupTimestampAlreadyObservedByAllNodes(nodeInfo, baseline)) { + baseline.setStartTimestamp(0); + } + if (nodeInfo.isStorage()) { + applyStorageSpecificStateTransforms(nodeInfo, params, reported, wanted, baseline); + } + if (baseline.above(wanted)) { + applyWantedStateToBaselineState(baseline, wanted); + } + + return baseline; + } + + private static void applyStorageSpecificStateTransforms(NodeInfo nodeInfo, Params params, NodeState reported, + NodeState wanted, NodeState baseline) + { + if (reported.getState() == State.INITIALIZING) { + if (timedOutWithoutNewInitProgress(reported, nodeInfo, params) + || shouldForceInitToDown(reported) + || nodeInfo.recentlyObservedUnstableDuringInit()) + { + baseline.setState(State.DOWN); + } + if (shouldForceInitToMaintenance(reported, wanted)) { + baseline.setState(State.MAINTENANCE); + } + } + // TODO ensure that maintenance cannot override Down for any other cases + if (withinTemporalMaintenancePeriod(nodeInfo, baseline, params) && wanted.getState() != State.DOWN) { + baseline.setState(State.MAINTENANCE); + } + } + + // TODO remove notion of init timeout progress? Seems redundant when we've already got RPC timeouts + private static boolean timedOutWithoutNewInitProgress(final NodeState reported, final NodeInfo nodeInfo, final Params params) { + if (reported.getState() != State.INITIALIZING) { + return false; + } + if (params.maxInitProgressTimeMs <= 0) { + return false; // No upper bound for max init time; auto-down for all intents and purposes disabled. + } + return nodeInfo.getInitProgressTime() + params.maxInitProgressTimeMs <= params.currentTimeInMillis; + } + + // Init while listing buckets should be treated as Down, as distributors expect a storage node + // in Init mode to have a bucket set readily available. Clients also expect a node in Init to + // be able to receive operations. + // Precondition: reported.getState() == State.INITIALIZING + private static boolean shouldForceInitToDown(final NodeState reported) { + return reported.getInitProgress() <= NodeState.getListingBucketsInitProgressLimit() + 0.00001; + } + + // Special case: since each node is published with a single state, if we let a Retired node + // be published with Initializing, it'd start receiving feed and merges. Avoid this by + // having it be in maintenance instead for the duration of the init period. + private static boolean shouldForceInitToMaintenance(final NodeState reported, final NodeState wanted) { + return reported.getState() == State.INITIALIZING && wanted.getState() == State.RETIRED; + } + + private static boolean startupTimestampAlreadyObservedByAllNodes(final NodeInfo nodeInfo, final NodeState baseline) { + return baseline.getStartTimestamp() == nodeInfo.getStartTimestamp(); // TODO rename NodeInfo getter/setter + } + + /** + * Determines whether a given storage node should be implicitly set as being + * in a maintenance state despite its reported state being Down. This is + * predominantly a case when contact has just been lost with a node, but we + * do not want to immediately set it to Down just yet (where "yet" is a configurable + * amount of time; see params.transitionTime). This is to prevent common node + * restart/upgrade scenarios from triggering redistribution and data replication + * that would be useless work if the node comes back up immediately afterwards. + * + * Only makes sense to call for storage nodes, since distributors don't support + * being in maintenance mode. + */ + private static boolean withinTemporalMaintenancePeriod(final NodeInfo nodeInfo, + final NodeState baseline, + final Params params) + { + final Integer transitionTime = params.transitionTimes.get(nodeInfo.getNode().getType()); + if (transitionTime == 0 || !baseline.getState().oneOf("sd")) { + return false; + } + return nodeInfo.getTransitionTime() + transitionTime > params.currentTimeInMillis; + } + + private static void takeDownGroupsWithTooLowAvailability(final ClusterState workingState, + Map<Node, NodeStateReason> nodeStateReasons, + final Params params) + { + final GroupAvailabilityCalculator calc = new GroupAvailabilityCalculator.Builder() + .withMinNodeRatioPerGroup(params.minNodeRatioPerGroup) + .withDistribution(params.cluster.getDistribution()) + .build(); + final Set<Integer> nodesToTakeDown = calc.nodesThatShouldBeDown(workingState); + + for (Integer idx : nodesToTakeDown) { + final Node node = storageNode(idx); + final NodeState newState = new NodeState(NodeType.STORAGE, State.DOWN); + newState.setDescription("group node availability below configured threshold"); + workingState.setNodeState(node, newState); + nodeStateReasons.put(node, NodeStateReason.GROUP_IS_DOWN); + } + } + + private static Node storageNode(int index) { + return new Node(NodeType.STORAGE, index); + } + + // TODO we'll want to explicitly persist a bit lower bound in ZooKeeper and ensure we + // never go below it (this is _not_ the case today). Nodes that have min bits lower than + // this will just have to start splitting out in the background before being allowed + // to join the cluster. + + private static int inferDistributionBitCount(final ContentCluster cluster, + final ClusterState state, + final Params params) + { + int bitCount = params.idealDistributionBits; + final Optional<Integer> minBits = cluster.getConfiguredNodes().values().stream() + .map(configuredNode -> cluster.getNodeInfo(storageNode(configuredNode.index()))) + .filter(node -> state.getNodeState(node.getNode()).getState().oneOf("iur")) + .map(nodeInfo -> nodeInfo.getReportedState().getMinUsedBits()) + .min(Integer::compare); + + if (minBits.isPresent() && minBits.get() < bitCount) { + bitCount = minBits.get(); + } + if (bitCount > params.lowestObservedDistributionBitCount && bitCount < params.idealDistributionBits) { + bitCount = params.lowestObservedDistributionBitCount; + } + + return bitCount; + } + + private static boolean nodeStateIsConsideredAvailable(final NodeState ns) { + return (ns.getState() == State.UP + || ns.getState() == State.RETIRED + || ns.getState() == State.INITIALIZING); + } + + private static long countAvailableNodesOfType(final NodeType type, + final ContentCluster cluster, + final ClusterState state) + { + return cluster.getConfiguredNodes().values().stream() + .map(node -> state.getNodeState(new Node(type, node.index()))) + .filter(ClusterStateGenerator::nodeStateIsConsideredAvailable) + .count(); + } + + private static Optional<ClusterStateReason> clusterDownReason(final ClusterState state, final Params params) { + final ContentCluster cluster = params.cluster; + + final long upStorageCount = countAvailableNodesOfType(NodeType.STORAGE, cluster, state); + final long upDistributorCount = countAvailableNodesOfType(NodeType.DISTRIBUTOR, cluster, state); + // There's a 1-1 relationship between distributors and storage nodes, so don't need to + // keep track of separate node counts for computing availability ratios. + final long nodeCount = cluster.getConfiguredNodes().size(); + + if (upStorageCount < params.minStorageNodesUp) { + return Optional.of(ClusterStateReason.TOO_FEW_STORAGE_NODES_AVAILABLE); + } + if (upDistributorCount < params.minDistributorNodesUp) { + return Optional.of(ClusterStateReason.TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE); + } + if (params.minRatioOfStorageNodesUp * nodeCount > upStorageCount) { + return Optional.of(ClusterStateReason.TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO); + } + if (params.minRatioOfDistributorNodesUp * nodeCount > upDistributorCount) { + return Optional.of(ClusterStateReason.TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO); + } + return Optional.empty(); + } + +} diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateHistoryEntry.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateHistoryEntry.java new file mode 100644 index 00000000000..3963fcaa45b --- /dev/null +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateHistoryEntry.java @@ -0,0 +1,46 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import com.yahoo.vdslib.state.ClusterState; + +import java.util.Objects; + +public class ClusterStateHistoryEntry { + + private final ClusterState state; + private final long time; + + ClusterStateHistoryEntry(final ClusterState state, final long time) { + this.state = state; + this.time = time; + } + + public ClusterState state() { + return state; + } + + public long time() { + return time; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClusterStateHistoryEntry that = (ClusterStateHistoryEntry) o; + return time == that.time && + Objects.equals(state, that.state); + } + + @Override + public int hashCode() { + return Objects.hash(state, time); + } + + // String representation only used for test expectation failures and debugging output. + // Actual status page history entry rendering emits formatted date/time. + public String toString() { + return String.format("state '%s' at time %d", state, time); + } + +} diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateReason.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateReason.java new file mode 100644 index 00000000000..3557ed1ceb8 --- /dev/null +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateReason.java @@ -0,0 +1,15 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +/** + * Explicit reasons for why a cluster has been assigned a particular global state. + * This only includes reasons that aren't directly possible to infer from diffing + * two cluster states; i.e. distribution bit changes aren't listed here because + * they are obvious from direct inspection. + */ +public enum ClusterStateReason { + TOO_FEW_STORAGE_NODES_AVAILABLE, + TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE, + TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO, + TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO, +} diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateView.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateView.java index 328acfb4dbe..644d6b28b05 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateView.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateView.java @@ -41,6 +41,10 @@ public class ClusterStateView { return new ClusterStateView(clusterState, createNewAggregator(clusterState, metricUpdater), metricUpdater); } + public static ClusterStateView create(final ClusterState clusterState, final MetricUpdater metricUpdater) { + return new ClusterStateView(clusterState, createNewAggregator(clusterState, metricUpdater), metricUpdater); + } + private static ClusterStatsAggregator createNewAggregator(ClusterState clusterState, MetricUpdater metricUpdater) { Set<Integer> upDistributors = getIndicesOfUpNodes(clusterState, NodeType.DISTRIBUTOR); Set<Integer> upStorageNodes = getIndicesOfUpNodes(clusterState, NodeType.STORAGE); diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculator.java new file mode 100644 index 00000000000..2e5d99f2e67 --- /dev/null +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculator.java @@ -0,0 +1,143 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import com.yahoo.vdslib.distribution.ConfiguredNode; +import com.yahoo.vdslib.state.ClusterState; +import com.yahoo.vdslib.state.Node; +import com.yahoo.vdslib.state.NodeState; +import com.yahoo.vdslib.state.NodeType; +import com.yahoo.vdslib.state.State; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +/** + * Responsible for inferring the difference between two cluster states and their + * state annotations and producing a set of events that describe the changes between + * the two. Diffing the states directly provides a clear picture of _what_ has changed, + * while the annotations are generally required to explain _why_ the changes happened + * in the first place. + * + * Events are primarily used for administrative/user visibility into what's happening + * in the cluster and are output to the Vespa log as well as kept in a circular history + * buffer per node and for the cluster as a whole. + */ +public class EventDiffCalculator { + + static class Params { + ContentCluster cluster; + AnnotatedClusterState fromState; + AnnotatedClusterState toState; + long currentTime; + + public Params cluster(ContentCluster cluster) { + this.cluster = cluster; + return this; + } + public Params fromState(AnnotatedClusterState clusterState) { + this.fromState = clusterState; + return this; + } + public Params toState(AnnotatedClusterState clusterState) { + this.toState = clusterState; + return this; + } + public Params currentTimeMs(long time) { + this.currentTime = time; + return this; + } + } + + public static List<Event> computeEventDiff(final Params params) { + final List<Event> events = new ArrayList<>(); + + emitPerNodeDiffEvents(params, events); + emitWholeClusterDiffEvent(params, events); + return events; + } + + private static ClusterEvent createClusterEvent(String description, Params params) { + return new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, description, params.currentTime); + } + + private static boolean clusterDownBecause(final Params params, ClusterStateReason wantedReason) { + final Optional<ClusterStateReason> actualReason = params.toState.getClusterStateReason(); + return actualReason.isPresent() && actualReason.get().equals(wantedReason); + } + + private static void emitWholeClusterDiffEvent(final Params params, final List<Event> events) { + final ClusterState fromState = params.fromState.getClusterState(); + final ClusterState toState = params.toState.getClusterState(); + + if (clusterHasTransitionedToUpState(fromState, toState)) { + events.add(createClusterEvent("Enough nodes available for system to become up", params)); + } else if (clusterHasTransitionedToDownState(fromState, toState)) { + if (clusterDownBecause(params, ClusterStateReason.TOO_FEW_STORAGE_NODES_AVAILABLE)) { + events.add(createClusterEvent("Too few storage nodes available in cluster. Setting cluster state down", params)); + } else if (clusterDownBecause(params, ClusterStateReason.TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE)) { + events.add(createClusterEvent("Too few distributor nodes available in cluster. Setting cluster state down", params)); + } else if (clusterDownBecause(params, ClusterStateReason.TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO)) { + events.add(createClusterEvent("Too low ratio of available storage nodes. Setting cluster state down", params)); + } else if (clusterDownBecause(params, ClusterStateReason.TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO)) { + events.add(createClusterEvent("Too low ratio of available distributor nodes. Setting cluster state down", params)); + } else { + events.add(createClusterEvent("Cluster is down", params)); + } + } + } + + private static NodeEvent createNodeEvent(NodeInfo nodeInfo, String description, Params params) { + return new NodeEvent(nodeInfo, description, NodeEvent.Type.CURRENT, params.currentTime); + } + + private static void emitPerNodeDiffEvents(final Params params, final List<Event> events) { + final ContentCluster cluster = params.cluster; + final ClusterState fromState = params.fromState.getClusterState(); + final ClusterState toState = params.toState.getClusterState(); + + for (ConfiguredNode node : cluster.getConfiguredNodes().values()) { + for (NodeType nodeType : NodeType.getTypes()) { + final Node n = new Node(nodeType, node.index()); + emitSingleNodeEvents(params, events, cluster, fromState, toState, n); + } + } + } + + private static void emitSingleNodeEvents(Params params, List<Event> events, ContentCluster cluster, ClusterState fromState, ClusterState toState, Node n) { + final NodeState nodeFrom = fromState.getNodeState(n); + final NodeState nodeTo = toState.getNodeState(n); + if (!nodeTo.equals(nodeFrom)) { + final NodeInfo info = cluster.getNodeInfo(n); + events.add(createNodeEvent(info, String.format("Altered node state in cluster state from '%s' to '%s'", + nodeFrom.toString(true), nodeTo.toString(true)), params)); + + NodeStateReason prevReason = params.fromState.getNodeStateReasons().get(n); + NodeStateReason currReason = params.toState.getNodeStateReasons().get(n); + if (isGroupDownEdge(prevReason, currReason)) { + events.add(createNodeEvent(info, "Group node availability is below configured threshold", params)); + } else if (isGroupUpEdge(prevReason, currReason)) { + events.add(createNodeEvent(info, "Group node availability has been restored", params)); + } + } + } + + private static boolean isGroupUpEdge(NodeStateReason prevReason, NodeStateReason currReason) { + return prevReason == NodeStateReason.GROUP_IS_DOWN && currReason != NodeStateReason.GROUP_IS_DOWN; + } + + private static boolean isGroupDownEdge(NodeStateReason prevReason, NodeStateReason currReason) { + return prevReason != NodeStateReason.GROUP_IS_DOWN && currReason == NodeStateReason.GROUP_IS_DOWN; + } + + private static boolean clusterHasTransitionedToUpState(ClusterState prevState, ClusterState currentState) { + return prevState.getClusterState() != State.UP && currentState.getClusterState() == State.UP; + } + + private static boolean clusterHasTransitionedToDownState(ClusterState prevState, ClusterState currentState) { + return prevState.getClusterState() != State.DOWN && currentState.getClusterState() == State.DOWN; + } + + public static Params params() { return new Params(); } + +} diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java index ceeeddf49fa..b21cae4ed71 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java @@ -7,6 +7,7 @@ import com.yahoo.vdslib.distribution.ConfiguredNode; import com.yahoo.vdslib.state.ClusterState; import com.yahoo.vdslib.state.Node; import com.yahoo.vdslib.state.NodeState; +import com.yahoo.vdslib.state.State; import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler; import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo; import com.yahoo.vespa.clustercontroller.core.listeners.*; @@ -37,8 +38,9 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd private final ContentCluster cluster; private final Communicator communicator; private final NodeStateGatherer stateGatherer; - private final SystemStateGenerator systemStateGenerator; + private final StateChangeHandler stateChangeHandler; private final SystemStateBroadcaster systemStateBroadcaster; + private final StateVersionTracker stateVersionTracker; private final StatusPageServerInterface statusPageServer; private final RpcServer rpcServer; private final DatabaseHandler database; @@ -59,7 +61,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>(); private long configGeneration = -1; private long nextConfigGeneration = -1; - private List<RemoteClusterControllerTask> remoteTasks = new ArrayList<>(); + private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>(); private final MetricUpdater metricUpdater; private boolean isMaster = false; @@ -69,7 +71,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd private final RunDataExtractor dataExtractor = new RunDataExtractor() { @Override - public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return systemStateGenerator.getClusterState(); } + public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); } @Override public FleetControllerOptions getOptions() { return options; } @Override @@ -87,7 +89,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd RpcServer server, NodeLookup nodeLookup, DatabaseHandler database, - SystemStateGenerator systemStateGenerator, + StateChangeHandler stateChangeHandler, SystemStateBroadcaster systemStateBroadcaster, MasterElectionHandler masterElectionHandler, MetricUpdater metricUpdater, @@ -103,8 +105,9 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd this.communicator = communicator; this.database = database; this.stateGatherer = nodeStateGatherer; - this.systemStateGenerator = systemStateGenerator; + this.stateChangeHandler = stateChangeHandler; this.systemStateBroadcaster = systemStateBroadcaster; + this.stateVersionTracker = new StateVersionTracker(metricUpdater); this.metricUpdater = metricUpdater; this.statusPageServer = statusPage; @@ -120,12 +123,12 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd new NodeHealthRequestHandler(dataExtractor)); this.statusRequestRouter.addHandler( "^/clusterstate", - new ClusterStateRequestHandler(systemStateGenerator)); + new ClusterStateRequestHandler(stateVersionTracker)); this.statusRequestRouter.addHandler( "^/$", new LegacyIndexPageRequestHandler( timer, options.showLocalSystemStatesInEventLog, cluster, - masterElectionHandler, systemStateGenerator, + masterElectionHandler, stateVersionTracker, eventLog, timer.getCurrentTimeInMillis(), dataExtractor)); propagateOptions(); @@ -169,7 +172,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd options.nodeStateRequestRoundTripTimeMaxSeconds); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); NodeLookup lookUp = new SlobrokClient(timer); - SystemStateGenerator stateGenerator = new SystemStateGenerator(timer, log, metricUpdater); + StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController( @@ -246,7 +249,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd public com.yahoo.vdslib.state.ClusterState getSystemState() { synchronized(monitor) { - return systemStateGenerator.getClusterState(); + return stateVersionTracker.getVersionedClusterState(); } } @@ -299,41 +302,41 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd @Override public void handleNewNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); - systemStateGenerator.handleNewReportedNodeState(node, newState, this); + stateChangeHandler.handleNewReportedNodeState(stateVersionTracker.getVersionedClusterState(), node, newState, this); } @Override public void handleNewWantedNodeState(NodeInfo node, NodeState newState) { verifyInControllerThread(); wantedStateChanged = true; - systemStateGenerator.proposeNewNodeState(node, newState); + stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState); } @Override public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) { verifyInControllerThread(); - systemStateGenerator.handleUpdatedHostInfo(nodeInfo, newHostInfo); + stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo); } @Override public void handleNewNode(NodeInfo node) { verifyInControllerThread(); - systemStateGenerator.handleNewNode(node); + stateChangeHandler.handleNewNode(node); } @Override public void handleMissingNode(NodeInfo node) { verifyInControllerThread(); - systemStateGenerator.handleMissingNode(node, this); + stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this); } @Override public void handleNewRpcAddress(NodeInfo node) { verifyInControllerThread(); - systemStateGenerator.handleNewRpcAddress(node); + stateChangeHandler.handleNewRpcAddress(node); } @Override public void handleReturnedRpcAddress(NodeInfo node) { verifyInControllerThread(); - systemStateGenerator.handleReturnedRpcAddress(node); + stateChangeHandler.handleReturnedRpcAddress(node); } public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) { @@ -370,7 +373,9 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd /** Called when all distributors have acked newest cluster state version. */ public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException { - systemStateGenerator.handleAllDistributorsInSync(database, context); + Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values()); + stateChangeHandler.handleAllDistributorsInSync( + stateVersionTracker.getVersionedClusterState(), nodes, database, context); } private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) { @@ -409,17 +414,11 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout); stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS); - systemStateGenerator.setNodes(cluster.clusterInfo()); - systemStateGenerator.setMaxTransitionTime(options.maxTransitionTime); - systemStateGenerator.setMaxInitProgressTime(options.maxInitProgressTime); - systemStateGenerator.setMaxPrematureCrashes(options.maxPrematureCrashes); - systemStateGenerator.setStableStateTimePeriod(options.stableStateTimePeriod); - systemStateGenerator.setMinNodesUp(options.minDistributorNodesUp, options.minStorageNodesUp, - options.minRatioOfDistributorNodesUp, options.minRatioOfStorageNodesUp); - systemStateGenerator.setMinNodeRatioPerGroup(options.minNodeRatioPerGroup); - systemStateGenerator.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); - systemStateGenerator.setDistributionBits(options.distributionBits); - systemStateGenerator.setDistribution(options.storageDistribution); + + // TODO: remove as many temporal parameter dependencies as possible here. Currently duplication of state. + stateChangeHandler.reconfigureFromOptions(options); + stateChangeHandler.setStateChangedFlag(); // Always trigger state recomputation after reconfig + masterElectionHandler.setFleetControllerCount(options.fleetControllerCount); masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod); @@ -491,7 +490,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd didWork = database.doNextZooKeeperTask(databaseContext); didWork |= updateMasterElectionState(); didWork |= handleLeadershipEdgeTransitions(); - systemStateGenerator.setMaster(isMaster); + stateChangeHandler.setMaster(isMaster); // Process zero or more getNodeState responses that we have received. didWork |= stateGatherer.processResponses(this); @@ -510,10 +509,10 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd didWork |= processAnyPendingStatusPageRequest(); if (rpcServer != null) { - didWork |= rpcServer.handleRpcRequests(cluster, systemStateGenerator.getClusterState(), this, this); + didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this); } - processAllQueuedRemoteTasks(); + didWork |= processNextQueuedRemoteTask(); processingCycle = false; ++cycleCount; @@ -606,25 +605,52 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd } } - private void processAllQueuedRemoteTasks() { + private boolean processNextQueuedRemoteTask() { if ( ! remoteTasks.isEmpty()) { - RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); - context.cluster = cluster; - context.currentState = systemStateGenerator.getConsolidatedClusterState(); - context.masterInfo = masterElectionHandler; - context.nodeStateOrHostInfoChangeHandler = this; - context.nodeAddedOrRemovedListener = this; - for (RemoteClusterControllerTask task : remoteTasks) { - log.finest("Processing remote task " + task.getClass().getName()); - task.doRemoteFleetControllerTask(context); - task.notifyCompleted(); - log.finest("Done processing remote task " + task.getClass().getName()); - } - log.fine("Completed processing remote tasks"); - remoteTasks.clear(); + final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext(); + final RemoteClusterControllerTask task = remoteTasks.poll(); + log.finest("Processing remote task " + task.getClass().getName()); + task.doRemoteFleetControllerTask(context); + task.notifyCompleted(); + log.finest("Done processing remote task " + task.getClass().getName()); + return true; } + return false; } + private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() { + final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context(); + context.cluster = cluster; + context.currentState = consolidatedClusterState(); + context.masterInfo = masterElectionHandler; + context.nodeStateOrHostInfoChangeHandler = this; + context.nodeAddedOrRemovedListener = this; + return context; + } + + /** + * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are + * up or down even when the whole cluster is down. The regular, published cluster state is not + * normally updated to reflect node events when the cluster is down. + */ + ClusterState consolidatedClusterState() { + final ClusterState publishedState = stateVersionTracker.getVersionedClusterState(); + if (publishedState.getClusterState() == State.UP) { + return publishedState; // Short-circuit; already represents latest node state + } + // Latest candidate state contains the most up to date state information, even if it may not + // have been published yet. + final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone(); + current.setVersion(publishedState.getVersion()); + return current; + } + + /* + System test observations: + - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling + - long time before content node state convergence (though this seems to be the case for legacy impl as well) + */ + private boolean resyncLocallyCachedState() throws InterruptedException { boolean didWork = false; // Let non-master state gatherers update wanted states once in a while, so states generated and shown are close to valid. @@ -637,31 +663,99 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd // Send getNodeState requests to zero or more nodes. didWork |= stateGatherer.sendMessages(cluster, communicator, this); - didWork |= systemStateGenerator.watchTimers(cluster, this); - didWork |= systemStateGenerator.notifyIfNewSystemState(cluster, this); + // Important: timer events must use a consolidated state, or they might trigger edge events multiple times. + didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this); + + didWork |= recomputeClusterStateIfRequired(); if ( ! isStateGatherer) { if ( ! isMaster) { eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis())); // Update versions to use so what is shown is closer to what is reality on the master - systemStateGenerator.setLatestSystemStateVersion(database.getLatestSystemStateVersion()); + stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); } } isStateGatherer = true; return didWork; } + private boolean recomputeClusterStateIfRequired() { + if (mustRecomputeCandidateClusterState()) { + stateChangeHandler.unsetStateChangedFlag(); + final AnnotatedClusterState candidate = computeCurrentAnnotatedState(); + stateVersionTracker.updateLatestCandidateState(candidate); + + if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish() + || stateVersionTracker.hasReceivedNewVersionFromZooKeeper()) + { + final long timeNowMs = timer.getCurrentTimeInMillis(); + final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState(); + + stateVersionTracker.promoteCandidateToVersionedState(timeNowMs); + emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs); + handleNewSystemState(stateVersionTracker.getVersionedClusterState()); + return true; + } + } + return false; + } + + private AnnotatedClusterState computeCurrentAnnotatedState() { + ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); + params.currentTimeInMilllis(timer.getCurrentTimeInMillis()) + .cluster(cluster) + .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits()); + return ClusterStateGenerator.generatedStateFrom(params); + } + + private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState, + final AnnotatedClusterState toState, + final long timeNowMs) { + final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff( + EventDiffCalculator.params() + .cluster(cluster) + .fromState(fromState) + .toState(toState) + .currentTimeMs(timeNowMs)); + for (Event event : deltaEvents) { + eventLog.add(event, isMaster); + } + + emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState()); + } + + private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) { + eventLog.add(new ClusterEvent( + ClusterEvent.Type.SYSTEMSTATE, + "New cluster state version " + toClusterState.getVersion() + ". Change from last: " + + fromClusterState.getTextualDifference(toClusterState), + timeNowMs), isMaster); + + if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) { + eventLog.add(new ClusterEvent( + ClusterEvent.Type.SYSTEMSTATE, + "Altering distribution bits in system from " + + fromClusterState.getDistributionBitCount() + " to " + + toClusterState.getDistributionBitCount(), + timeNowMs), isMaster); + } + } + + private boolean mustRecomputeCandidateClusterState() { + return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper(); + } + private boolean handleLeadershipEdgeTransitions() throws InterruptedException { boolean didWork = false; if (masterElectionHandler.isMaster()) { if ( ! isMaster) { metricUpdater.becameMaster(); // If we just became master, restore wanted states from database - systemStateGenerator.setLatestSystemStateVersion(database.getLatestSystemStateVersion()); + stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion()); didWork = database.loadStartTimestamps(cluster); didWork |= database.loadWantedStates(databaseContext); eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to " - + systemStateGenerator.getClusterState().getVersion() + " to be in line.", timer.getCurrentTimeInMillis())); + + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis())); long currentTime = timer.getCurrentTimeInMillis(); firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast; log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be " @@ -693,6 +787,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd } catch (InterruptedException e) { log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e); } catch (Throwable t) { + t.printStackTrace(); log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t); synchronized (monitor) { running = false; } System.exit(1); diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java index 74b15b61ac3..e24e5f6914e 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java @@ -10,6 +10,7 @@ import com.yahoo.vdslib.state.NodeState; import com.yahoo.vdslib.state.NodeType; import com.yahoo.vdslib.state.State; +import java.util.Collections; import java.util.HashSet; import java.util.Set; import java.util.stream.Stream; @@ -105,6 +106,9 @@ class GroupAvailabilityCalculator { } public Set<Integer> nodesThatShouldBeDown(ClusterState state) { + if (distribution == null) { // FIXME: for tests that don't set distribution properly! + return Collections.emptySet(); + } if (isFlatCluster(distribution.getRootGroup())) { // Implicit group takedown only applies to hierarchic cluster setups. return new HashSet<>(); diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java index 6c48bdf12d0..1a48b088ca3 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java @@ -240,7 +240,7 @@ public class MasterElectionHandler implements MasterInterface { .append(".</p>"); } else if (masterGoneFromZooKeeperTime + masterZooKeeperCooldownPeriod > timer.getCurrentTimeInMillis()) { long time = timer.getCurrentTimeInMillis() - masterGoneFromZooKeeperTime; - sb.append("<p>There is currently no master. Only " + (time / 1000) + " seconds have past since") + sb.append("<p>There is currently no master. Only " + (time / 1000) + " seconds have passed since") .append(" old master disappeared. At least " + (masterZooKeeperCooldownPeriod / 1000) + " must pass") .append(" before electing new master unless all possible master candidates are online.</p>"); } @@ -249,7 +249,7 @@ public class MasterElectionHandler implements MasterInterface { sb.append("<p>As we are number ").append(nextInLineCount) .append(" in line for taking over as master, we're gathering state from nodes.</p>"); sb.append("<p><font color=\"red\">As we are not the master, we don't know about nodes current system state" - + " or wanted states, so some statistics below are a bit incorrect. Look at status page on master " + + " or wanted states, so some statistics below may be stale. Look at status page on master " + "for updated data.</font></p>"); } if (index * 2 > totalCount) { diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeEvent.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeEvent.java index d9d83c705b1..944cbd02082 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeEvent.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeEvent.java @@ -45,4 +45,8 @@ public class NodeEvent implements Event { public String getCategory() { return type.toString(); } + + public Type getType() { + return type; + } } diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java index c261a4bb194..87a32e1e088 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java @@ -35,6 +35,18 @@ abstract public class NodeInfo implements Comparable<NodeInfo> { /** Whether this node has been configured to be retired and should therefore always return retired as its wanted state */ private boolean configuredRetired; + /** + * Node has been observed transitioning from Init to Down at least once during the last "premature crash count" + * period. Gets reset whenever the crash count is reset to zero after a period of stability. + * + * Flag can also be explicitly toggled by external code, such as if a reported node state + * handler discovers "reverse" init progress. This indicates a "silent" down edge and should be + * handled as such. + * + * It is an explicit choice that we only do this on an edge to Down (and not Stopping). Stopping implies + * an administrative action, not that the node itself is unstable. + */ + private boolean recentlyObservedUnstableDuringInit; /** The time we set the current state last. */ private long nextAttemptTime; @@ -97,6 +109,7 @@ abstract public class NodeInfo implements Comparable<NodeInfo> { this.version = getLatestVersion(); this.connectionVersion = getLatestVersion(); this.configuredRetired = configuredRetired; + this.recentlyObservedUnstableDuringInit = false; this.rpcAddress = rpcAddress; this.lastSeenInSlobrok = null; this.nextAttemptTime = 0; @@ -132,7 +145,17 @@ abstract public class NodeInfo implements Comparable<NodeInfo> { public int getConnectionAttemptCount() { return connectionAttemptCount; } + public boolean recentlyObservedUnstableDuringInit() { + return recentlyObservedUnstableDuringInit; + } + public void setRecentlyObservedUnstableDuringInit(boolean unstable) { + recentlyObservedUnstableDuringInit = unstable; + } + public void setPrematureCrashCount(int count) { + if (count == 0) { + recentlyObservedUnstableDuringInit = false; + } if (prematureCrashCount != count) { prematureCrashCount = count; log.log(LogLevel.DEBUG, "Premature crash count on " + toString() + " set to " + count); @@ -213,6 +236,7 @@ abstract public class NodeInfo implements Comparable<NodeInfo> { public ContentCluster getCluster() { return cluster; } /** Returns true if the node is currentl registered in slobrok */ + // FIXME why is this called "isRpcAddressOutdated" then??? public boolean isRpcAddressOutdated() { return lastSeenInSlobrok != null; } public Long getRpcAddressOutdatedTimestamp() { return lastSeenInSlobrok; } @@ -277,8 +301,10 @@ abstract public class NodeInfo implements Comparable<NodeInfo> { if (state.getState().equals(State.DOWN) && !reportedState.getState().oneOf("d")) { downStableStateTime = time; log.log(LogLevel.DEBUG, "Down stable state on " + toString() + " altered to " + time); - } - else if (state.getState().equals(State.UP) && !reportedState.getState().oneOf("u")) { + if (reportedState.getState() == State.INITIALIZING) { + recentlyObservedUnstableDuringInit = true; + } + } else if (state.getState().equals(State.UP) && !reportedState.getState().oneOf("u")) { upStableStateTime = time; log.log(LogLevel.DEBUG, "Up stable state on " + toString() + " altered to " + time); } @@ -403,7 +429,7 @@ abstract public class NodeInfo implements Comparable<NodeInfo> { public void setSystemStateVersionSent(ClusterState state) { if (state == null) throw new Error("Should not clear info for last version sent"); if (systemStateVersionSent.containsKey(state.getVersion())) { - throw new IllegalStateException("We have already sent cluster state version " + version + " to " + node); + throw new IllegalStateException("We have already sent cluster state version " + state.getVersion() + " to " + node); } systemStateVersionSent.put(state.getVersion(), state); } diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateReason.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateReason.java new file mode 100644 index 00000000000..da338626d5d --- /dev/null +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateReason.java @@ -0,0 +1,10 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +public enum NodeStateReason { + // FIXME some of these reasons may be unnecessary as they are reported implicitly by reported/wanted state changes + NODE_TOO_UNSTABLE, + WITHIN_MAINTENANCE_GRACE_PERIOD, + FORCED_INTO_MAINTENANCE, + GROUP_IS_DOWN +} diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java new file mode 100644 index 00000000000..83ba274c422 --- /dev/null +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java @@ -0,0 +1,530 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import com.yahoo.jrt.Spec; +import com.yahoo.log.LogLevel; +import com.yahoo.vdslib.distribution.ConfiguredNode; +import com.yahoo.vdslib.state.*; +import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler; +import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChangeHandler; + +import java.util.*; +import java.util.logging.Logger; + +/** + * This class gets node state updates and timer events and uses these to decide + * whether a new cluster state should be generated. + * + * TODO refactor logic out into smaller, separate components. Still state duplication + * between ClusterStateGenerator and StateChangeHandler, especially for temporal + * state transition configuration parameters. + */ +public class StateChangeHandler { + + private static Logger log = Logger.getLogger(StateChangeHandler.class.getName()); + + private final Timer timer; + private final EventLogInterface eventLog; + private boolean stateMayHaveChanged = false; + private boolean isMaster = false; + + private Map<NodeType, Integer> maxTransitionTime = new TreeMap<>(); + private int maxInitProgressTime = 5000; + private int maxPrematureCrashes = 4; + private long stableStateTimePeriod = 60 * 60 * 1000; + private Map<Integer, String> hostnames = new HashMap<>(); + private int maxSlobrokDisconnectGracePeriod = 1000; + private static final boolean disableUnstableNodes = true; + + /** + * @param metricUpdater may be null, in which case no metrics will be recorded. + */ + public StateChangeHandler(Timer timer, EventLogInterface eventLog, MetricUpdater metricUpdater) { + this.timer = timer; + this.eventLog = eventLog; + maxTransitionTime.put(NodeType.DISTRIBUTOR, 5000); + maxTransitionTime.put(NodeType.STORAGE, 5000); + } + + public void handleAllDistributorsInSync(final ClusterState currentState, + final Set<ConfiguredNode> nodes, + final DatabaseHandler database, + final DatabaseHandler.Context dbContext) throws InterruptedException { + int startTimestampsReset = 0; + log.log(LogLevel.DEBUG, String.format("handleAllDistributorsInSync invoked for state version %d", currentState.getVersion())); + for (NodeType nodeType : NodeType.getTypes()) { + for (ConfiguredNode configuredNode : nodes) { + final Node node = new Node(nodeType, configuredNode.index()); + final NodeInfo nodeInfo = dbContext.getCluster().getNodeInfo(node); + final NodeState nodeState = currentState.getNodeState(node); + if (nodeInfo != null && nodeState != null) { + if (nodeState.getStartTimestamp() > nodeInfo.getStartTimestamp()) { + if (log.isLoggable(LogLevel.DEBUG)) { + log.log(LogLevel.DEBUG, String.format("Storing away new start timestamp for node %s (%d)", + node, nodeState.getStartTimestamp())); + } + nodeInfo.setStartTimestamp(nodeState.getStartTimestamp()); + } + if (nodeState.getStartTimestamp() > 0) { + if (log.isLoggable(LogLevel.DEBUG)) { + log.log(LogLevel.DEBUG, String.format("Resetting timestamp in cluster state for node %s", node)); + } + ++startTimestampsReset; + } + } else if (log.isLoggable(LogLevel.DEBUG)) { + log.log(LogLevel.DEBUG, node + ": " + + (nodeInfo == null ? "null" : nodeInfo.getStartTimestamp()) + ", " + + (nodeState == null ? "null" : nodeState.getStartTimestamp())); + } + } + } + if (startTimestampsReset > 0) { + eventLog.add(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, "Reset " + startTimestampsReset + + " start timestamps as all available distributors have seen newest cluster state.", + timer.getCurrentTimeInMillis())); + stateMayHaveChanged = true; + database.saveStartTimestamps(dbContext); + } else { + log.log(LogLevel.DEBUG, "Found no start timestamps to reset in cluster state."); + } + } + + public boolean stateMayHaveChanged() { + return stateMayHaveChanged; + } + + public void setStateChangedFlag() { stateMayHaveChanged = true; } + public void unsetStateChangedFlag() { + stateMayHaveChanged = false; + } + + public void setMaster(boolean isMaster) { + this.isMaster = isMaster; + } + + public void setMaxTransitionTime(Map<NodeType, Integer> map) { maxTransitionTime = map; } + public void setMaxInitProgressTime(int millisecs) { maxInitProgressTime = millisecs; } + public void setMaxSlobrokDisconnectGracePeriod(int millisecs) { + maxSlobrokDisconnectGracePeriod = millisecs; + } + public void setStableStateTimePeriod(long millisecs) { stableStateTimePeriod = millisecs; } + public void setMaxPrematureCrashes(int count) { maxPrematureCrashes = count; } + + // TODO nodeListener is only used via updateNodeInfoFromReportedState -> handlePrematureCrash + // TODO this will recursively invoke proposeNewNodeState, which will presumably (i.e. hopefully) be a no-op... + public void handleNewReportedNodeState(final ClusterState currentClusterState, + final NodeInfo node, + final NodeState reportedState, + final NodeStateOrHostInfoChangeHandler nodeListener) + { + final NodeState currentState = currentClusterState.getNodeState(node.getNode()); + final LogLevel level = (currentState.equals(reportedState) && node.getVersion() == 0) ? LogLevel.SPAM : LogLevel.DEBUG; + if (log.isLoggable(level)) { + log.log(level, String.format("Got nodestate reply from %s: %s (Current state is %s)", + node, node.getReportedState().getTextualDifference(reportedState), currentState.toString(true))); + } + final long currentTime = timer.getCurrentTimeInMillis(); + + if (reportedState.getState().equals(State.DOWN)) { + node.setTimeOfFirstFailingConnectionAttempt(currentTime); + } + + // *** LOGGING ONLY + if ( ! reportedState.similarTo(node.getReportedState())) { + if (reportedState.getState().equals(State.DOWN)) { + eventLog.addNodeOnlyEvent(new NodeEvent(node, "Failed to get node state: " + reportedState.toString(true), NodeEvent.Type.REPORTED, currentTime), LogLevel.INFO); + } else { + eventLog.addNodeOnlyEvent(new NodeEvent(node, "Now reporting state " + reportedState.toString(true), NodeEvent.Type.REPORTED, currentTime), LogLevel.DEBUG); + } + } + + if (reportedState.equals(node.getReportedState()) && ! reportedState.getState().equals(State.INITIALIZING)) { + return; + } + + updateNodeInfoFromReportedState(node, currentState, reportedState, nodeListener); + + if (reportedState.getMinUsedBits() != currentState.getMinUsedBits()) { + final int oldCount = currentState.getMinUsedBits(); + final int newCount = reportedState.getMinUsedBits(); + log.log(LogLevel.DEBUG, + String.format("Altering node state to reflect that min distribution bit count has changed from %d to %d", + oldCount, newCount)); + eventLog.add(new NodeEvent(node, String.format("Altered min distribution bit count from %d to %d", oldCount, newCount), + NodeEvent.Type.CURRENT, currentTime), isMaster); + } else if (log.isLoggable(LogLevel.DEBUG)) { + log.log(LogLevel.DEBUG, String.format("Not altering state of %s in cluster state because new state is too similar: %s", + node, currentState.getTextualDifference(reportedState))); + } + + stateMayHaveChanged = true; + } + + public void handleNewNode(NodeInfo node) { + setHostName(node); + String message = "Found new node " + node + " in slobrok at " + node.getRpcAddress(); + eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster); + } + + public void handleMissingNode(final ClusterState currentClusterState, + final NodeInfo node, + final NodeStateOrHostInfoChangeHandler nodeListener) + { + removeHostName(node); + + final long timeNow = timer.getCurrentTimeInMillis(); + + if (node.getLatestNodeStateRequestTime() != null) { + eventLog.add(new NodeEvent(node, "Node is no longer in slobrok, but we still have a pending state request.", NodeEvent.Type.REPORTED, timeNow), isMaster); + } else { + eventLog.add(new NodeEvent(node, "Node is no longer in slobrok. No pending state request to node.", NodeEvent.Type.REPORTED, timeNow), isMaster); + } + + if (node.getReportedState().getState().equals(State.STOPPING)) { + log.log(LogLevel.DEBUG, "Node " + node.getNode() + " is no longer in slobrok. Was in stopping state, so assuming it has shut down normally. Setting node down"); + NodeState ns = node.getReportedState().clone(); + ns.setState(State.DOWN); + handleNewReportedNodeState(currentClusterState, node, ns.clone(), nodeListener); + } else { + log.log(LogLevel.DEBUG, "Node " + node.getNode() + " no longer in slobrok was in state " + node.getReportedState() + ". Waiting to see if it reappears in slobrok"); + } + + stateMayHaveChanged = true; + } + + /** + * Propose a new state for a node. This may happen due to an administrator action, orchestration, or + * a configuration change. + * + * If the newly proposed state differs from the state the node currently has in the system, + * a cluster state regeneration will be triggered. + */ + public void proposeNewNodeState(final ClusterState currentClusterState, final NodeInfo node, final NodeState proposedState) { + final NodeState currentState = currentClusterState.getNodeState(node.getNode()); + final NodeState currentReported = node.getReportedState(); + + if (currentState.getState().equals(proposedState.getState())) { + return; + } + stateMayHaveChanged = true; + + if (log.isLoggable(LogLevel.DEBUG)) { + log.log(LogLevel.DEBUG, String.format("Got new wanted nodestate for %s: %s", node, currentState.getTextualDifference(proposedState))); + } + // Should be checked earlier before state was set in cluster + assert(proposedState.getState().validWantedNodeState(node.getNode().getType())); + long timeNow = timer.getCurrentTimeInMillis(); + if (proposedState.above(currentReported)) { + eventLog.add(new NodeEvent(node, String.format("Wanted state %s, but we cannot force node into that " + + "state yet as it is currently in %s", proposedState, currentReported), + NodeEvent.Type.REPORTED, timeNow), isMaster); + return; + } + if ( ! proposedState.similarTo(currentState)) { + eventLog.add(new NodeEvent(node, String.format("Node state set to %s.", proposedState), + NodeEvent.Type.WANTED, timeNow), isMaster); + } + } + + public void handleNewRpcAddress(NodeInfo node) { + setHostName(node); + String message = "Node " + node + " has a new address in slobrok: " + node.getRpcAddress(); + eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster); + } + + public void handleReturnedRpcAddress(NodeInfo node) { + setHostName(node); + String message = "Node got back into slobrok with same address as before: " + node.getRpcAddress(); + eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster); + } + + private void setHostName(NodeInfo node) { + String rpcAddress = node.getRpcAddress(); + if (rpcAddress == null) { + // This may happen if we haven't seen the node in Slobrok yet. + return; + } + + Spec address = new Spec(rpcAddress); + if (address.malformed()) { + return; + } + + hostnames.put(node.getNodeIndex(), address.host()); + } + + void reconfigureFromOptions(FleetControllerOptions options) { + setMaxPrematureCrashes(options.maxPrematureCrashes); + setStableStateTimePeriod(options.stableStateTimePeriod); + setMaxInitProgressTime(options.maxInitProgressTime); + setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod); + setMaxTransitionTime(options.maxTransitionTime); + } + + private void removeHostName(NodeInfo node) { + hostnames.remove(node.getNodeIndex()); + } + + Map<Integer, String> getHostnames() { + return Collections.unmodifiableMap(hostnames); + } + + // TODO too many hidden behavior dependencies between this and the actually + // generated cluster state. Still a bit of a mine field... + // TODO remove all node state mutation from this function entirely in favor of ClusterStateGenerator! + // `--> this will require adding more event edges and premature crash handling to it. Which is fine. + public boolean watchTimers(final ContentCluster cluster, + final ClusterState currentClusterState, + final NodeStateOrHostInfoChangeHandler nodeListener) + { + boolean triggeredAnyTimers = false; + final long currentTime = timer.getCurrentTimeInMillis(); + + for(NodeInfo node : cluster.getNodeInfo()) { + triggeredAnyTimers |= handleTimeDependentOpsForNode(currentClusterState, nodeListener, currentTime, node); + } + + if (triggeredAnyTimers) { + stateMayHaveChanged = true; + } + return triggeredAnyTimers; + } + + private boolean handleTimeDependentOpsForNode(final ClusterState currentClusterState, + final NodeStateOrHostInfoChangeHandler nodeListener, + final long currentTime, + final NodeInfo node) + { + final NodeState currentStateInSystem = currentClusterState.getNodeState(node.getNode()); + final NodeState lastReportedState = node.getReportedState(); + boolean triggeredAnyTimers = false; + + triggeredAnyTimers = reportDownIfOutdatedSlobrokNode( + currentClusterState, nodeListener, currentTime, node, lastReportedState); + + if (nodeStillUnavailableAfterTransitionTimeExceeded( + currentTime, node, currentStateInSystem, lastReportedState)) + { + eventLog.add(new NodeEvent(node, String.format( + "%d milliseconds without contact. Marking node down.", + currentTime - node.getTransitionTime()), + NodeEvent.Type.CURRENT, currentTime), isMaster); + triggeredAnyTimers = true; + } + + if (nodeInitProgressHasTimedOut(currentTime, node, currentStateInSystem, lastReportedState)) { + eventLog.add(new NodeEvent(node, String.format( + "%d milliseconds without initialize progress. Marking node down. " + + "Premature crash count is now %d.", + currentTime - node.getInitProgressTime(), + node.getPrematureCrashCount() + 1), + NodeEvent.Type.CURRENT, currentTime), isMaster); + handlePrematureCrash(node, nodeListener); + triggeredAnyTimers = true; + } + + if (mayResetCrashCounterOnStableUpNode(currentTime, node, lastReportedState)) { + node.setPrematureCrashCount(0); + log.log(LogLevel.DEBUG, "Resetting premature crash count on node " + node + " as it has been up for a long time."); + triggeredAnyTimers = true; + } else if (mayResetCrashCounterOnStableDownNode(currentTime, node, lastReportedState)) { + node.setPrematureCrashCount(0); + log.log(LogLevel.DEBUG, "Resetting premature crash count on node " + node + " as it has been down for a long time."); + triggeredAnyTimers = true; + } + + return triggeredAnyTimers; + } + + private boolean nodeInitProgressHasTimedOut(long currentTime, NodeInfo node, NodeState currentStateInSystem, NodeState lastReportedState) { + return !currentStateInSystem.getState().equals(State.DOWN) + && node.getWantedState().above(new NodeState(node.getNode().getType(), State.DOWN)) + && lastReportedState.getState().equals(State.INITIALIZING) + && maxInitProgressTime != 0 + && node.getInitProgressTime() + maxInitProgressTime <= currentTime + && node.getNode().getType().equals(NodeType.STORAGE); + } + + private boolean mayResetCrashCounterOnStableDownNode(long currentTime, NodeInfo node, NodeState lastReportedState) { + return node.getDownStableStateTime() + stableStateTimePeriod <= currentTime + && lastReportedState.getState().equals(State.DOWN) + && node.getPrematureCrashCount() <= maxPrematureCrashes + && node.getPrematureCrashCount() != 0; + } + + private boolean mayResetCrashCounterOnStableUpNode(long currentTime, NodeInfo node, NodeState lastReportedState) { + return node.getUpStableStateTime() + stableStateTimePeriod <= currentTime + && lastReportedState.getState().equals(State.UP) + && node.getPrematureCrashCount() <= maxPrematureCrashes + && node.getPrematureCrashCount() != 0; + } + + private boolean nodeStillUnavailableAfterTransitionTimeExceeded( + long currentTime, + NodeInfo node, + NodeState currentStateInSystem, + NodeState lastReportedState) + { + return currentStateInSystem.getState().equals(State.MAINTENANCE) + && node.getWantedState().above(new NodeState(node.getNode().getType(), State.DOWN)) + && (lastReportedState.getState().equals(State.DOWN) || node.isRpcAddressOutdated()) + && node.getTransitionTime() + maxTransitionTime.get(node.getNode().getType()) < currentTime; + } + + private boolean reportDownIfOutdatedSlobrokNode(ClusterState currentClusterState, + NodeStateOrHostInfoChangeHandler nodeListener, + long currentTime, + NodeInfo node, + NodeState lastReportedState) + { + if (node.isRpcAddressOutdated() + && !lastReportedState.getState().equals(State.DOWN) + && node.getRpcAddressOutdatedTimestamp() + maxSlobrokDisconnectGracePeriod <= currentTime) + { + final String desc = String.format( + "Set node down as it has been out of slobrok for %d ms which " + + "is more than the max limit of %d ms.", + currentTime - node.getRpcAddressOutdatedTimestamp(), + maxSlobrokDisconnectGracePeriod); + node.abortCurrentNodeStateRequests(); + NodeState state = lastReportedState.clone(); + state.setState(State.DOWN); + if (!state.hasDescription()) { + state.setDescription(desc); + } + eventLog.add(new NodeEvent(node, desc, NodeEvent.Type.CURRENT, currentTime), isMaster); + handleNewReportedNodeState(currentClusterState, node, state.clone(), nodeListener); + node.setReportedState(state, currentTime); + return true; + } + return false; + } + + private boolean isControlledShutdown(NodeState state) { + return (state.getState() == State.STOPPING + && (state.getDescription().contains("Received signal 15 (SIGTERM - Termination signal)") + || state.getDescription().contains("controlled shutdown"))); + } + + /** + * Modify a node's cross-state information in the cluster based on a newly arrived reported state. + * + * @param node the node we are computing the state of + * @param currentState the current state of the node + * @param reportedState the new state reported by (or, in the case of down - inferred from) the node + * @param nodeListener this listener is notified for some of the system state changes that this will return + */ + private void updateNodeInfoFromReportedState(final NodeInfo node, + final NodeState currentState, + final NodeState reportedState, + final NodeStateOrHostInfoChangeHandler nodeListener) { + final long timeNow = timer.getCurrentTimeInMillis(); + if (log.isLoggable(LogLevel.DEBUG)) { + log.log(LogLevel.DEBUG, String.format("Finding new cluster state entry for %s switching state %s", + node, currentState.getTextualDifference(reportedState))); + } + + if (handleReportedNodeCrashEdge(node, currentState, reportedState, nodeListener, timeNow)) { + return; + } + if (initializationProgressHasIncreased(currentState, reportedState)) { + node.setInitProgressTime(timeNow); + if (log.isLoggable(LogLevel.SPAM)) { + log.log(LogLevel.SPAM, "Reset initialize timer on " + node + " to " + node.getInitProgressTime()); + } + } + if (handleImplicitCrashEdgeFromReverseInitProgress(node, currentState, reportedState, nodeListener, timeNow)) { + return; + } + markNodeUnstableIfDownEdgeDuringInit(node, currentState, reportedState, nodeListener, timeNow); + } + + // If we go down while initializing, mark node unstable, such that we don't mark it initializing again before it is up. + private void markNodeUnstableIfDownEdgeDuringInit(final NodeInfo node, + final NodeState currentState, + final NodeState reportedState, + final NodeStateOrHostInfoChangeHandler nodeListener, + final long timeNow) { + if (currentState.getState().equals(State.INITIALIZING) + && reportedState.getState().oneOf("ds") + && !isControlledShutdown(reportedState)) + { + eventLog.add(new NodeEvent(node, String.format("Stop or crash during initialization. " + + "Premature crash count is now %d.", node.getPrematureCrashCount() + 1), + NodeEvent.Type.CURRENT, timeNow), isMaster); + handlePrematureCrash(node, nodeListener); + } + } + + // TODO do we need this when we have startup timestamps? at least it's unit tested. + // TODO this seems fairly contrived... + // If we get reverse initialize progress, mark node unstable, such that we don't mark it initializing again before it is up. + private boolean handleImplicitCrashEdgeFromReverseInitProgress(final NodeInfo node, + final NodeState currentState, + final NodeState reportedState, + final NodeStateOrHostInfoChangeHandler nodeListener, + final long timeNow) { + if (currentState.getState().equals(State.INITIALIZING) && + (reportedState.getState().equals(State.INITIALIZING) && reportedState.getInitProgress() < currentState.getInitProgress())) + { + eventLog.add(new NodeEvent(node, String.format( + "Stop or crash during initialization detected from reverse initializing progress." + + " Progress was %g but is now %g. Premature crash count is now %d.", + currentState.getInitProgress(), reportedState.getInitProgress(), + node.getPrematureCrashCount() + 1), + NodeEvent.Type.CURRENT, timeNow), isMaster); + node.setRecentlyObservedUnstableDuringInit(true); + handlePrematureCrash(node, nodeListener); + return true; + } + return false; + } + + private boolean handleReportedNodeCrashEdge(NodeInfo node, NodeState currentState, + NodeState reportedState, NodeStateOrHostInfoChangeHandler nodeListener, + long timeNow) { + if (nodeUpToDownEdge(node, currentState, reportedState)) { + node.setTransitionTime(timeNow); + if (node.getUpStableStateTime() + stableStateTimePeriod > timeNow && !isControlledShutdown(reportedState)) { + log.log(LogLevel.DEBUG, "Stable state: " + node.getUpStableStateTime() + " + " + stableStateTimePeriod + " > " + timeNow); + eventLog.add(new NodeEvent(node, + String.format("Stopped or possibly crashed after %d ms, which is before " + + "stable state time period. Premature crash count is now %d.", + timeNow - node.getUpStableStateTime(), node.getPrematureCrashCount() + 1), + NodeEvent.Type.CURRENT, + timeNow), isMaster); + if (handlePrematureCrash(node, nodeListener)) { + return true; + } + } + } + return false; + } + + private boolean initializationProgressHasIncreased(NodeState currentState, NodeState reportedState) { + return reportedState.getState().equals(State.INITIALIZING) && + (!currentState.getState().equals(State.INITIALIZING) || + reportedState.getInitProgress() > currentState.getInitProgress()); + } + + private boolean nodeUpToDownEdge(NodeInfo node, NodeState currentState, NodeState reportedState) { + return currentState.getState().oneOf("ur") && reportedState.getState().oneOf("dis") + && (node.getWantedState().getState().equals(State.RETIRED) || !reportedState.getState().equals(State.INITIALIZING)); + } + + private boolean handlePrematureCrash(NodeInfo node, NodeStateOrHostInfoChangeHandler changeListener) { + node.setPrematureCrashCount(node.getPrematureCrashCount() + 1); + if (disableUnstableNodes && node.getPrematureCrashCount() > maxPrematureCrashes) { + NodeState wantedState = new NodeState(node.getNode().getType(), State.DOWN) + .setDescription("Disabled by fleet controller as it prematurely shut down " + node.getPrematureCrashCount() + " times in a row"); + NodeState oldState = node.getWantedState(); + node.setWantedState(wantedState); + if ( ! oldState.equals(wantedState)) { + changeListener.handleNewWantedNodeState(node, wantedState); + } + return true; + } + return false; + } + +} diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateVersionTracker.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateVersionTracker.java new file mode 100644 index 00000000000..f5a67ca9434 --- /dev/null +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateVersionTracker.java @@ -0,0 +1,140 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import com.yahoo.vdslib.state.ClusterState; +import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo; + +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +/** + * Keeps track of the active cluster state and handles the transition edges between + * one state to the next. In particular, it ensures that states have strictly increasing + * version numbers. + * + * Wraps ClusterStateView to ensure its knowledge of available nodes stays up to date. + */ +public class StateVersionTracker { + + // We always increment the version _before_ publishing, so the effective first cluster + // state version when starting from 1 will be 2. This matches legacy behavior and a bunch + // of existing tests expect it. + private int currentVersion = 1; + private int lastZooKeeperVersion = 0; + + // The lowest published distribution bit count for the lifetime of this controller. + // TODO this mirrors legacy behavior, but should be moved into stable ZK state. + private int lowestObservedDistributionBits = 16; + + private ClusterState currentUnversionedState = ClusterState.emptyState(); + private AnnotatedClusterState latestCandidateState = AnnotatedClusterState.emptyState(); + private AnnotatedClusterState currentClusterState = latestCandidateState; + + private final MetricUpdater metricUpdater; + private ClusterStateView clusterStateView; + + private final LinkedList<ClusterStateHistoryEntry> clusterStateHistory = new LinkedList<>(); + private int maxHistoryEntryCount = 50; + + StateVersionTracker(final MetricUpdater metricUpdater) { + this.metricUpdater = metricUpdater; + clusterStateView = ClusterStateView.create(currentUnversionedState, metricUpdater); + } + + void setVersionRetrievedFromZooKeeper(final int version) { + this.currentVersion = Math.max(1, version); + this.lastZooKeeperVersion = this.currentVersion; + } + + /** + * Sets limit on how many cluster states can be kept in the in-memory queue. Once + * the list exceeds this limit, the oldest state is repeatedly removed until the limit + * is no longer exceeded. + * + * Takes effect upon the next invocation of promoteCandidateToVersionedState(). + */ + void setMaxHistoryEntryCount(final int maxHistoryEntryCount) { + this.maxHistoryEntryCount = maxHistoryEntryCount; + } + + int getCurrentVersion() { + return this.currentVersion; + } + + boolean hasReceivedNewVersionFromZooKeeper() { + return currentVersion <= lastZooKeeperVersion; + } + + int getLowestObservedDistributionBits() { + return lowestObservedDistributionBits; + } + + AnnotatedClusterState getAnnotatedVersionedClusterState() { + return currentClusterState; + } + + public ClusterState getVersionedClusterState() { + return currentClusterState.getClusterState(); + } + + public void updateLatestCandidateState(final AnnotatedClusterState candidate) { + assert(latestCandidateState.getClusterState().getVersion() == 0); + latestCandidateState = candidate; + } + + /** + * Returns the last state provided to updateLatestCandidateState, which _may or may not_ be + * a published state. Primary use case for this function is a caller which is interested in + * changes that may not be reflected in the published state. The best example of this would + * be node state changes when a cluster is marked as Down. + */ + public AnnotatedClusterState getLatestCandidateState() { + return latestCandidateState; + } + + public List<ClusterStateHistoryEntry> getClusterStateHistory() { + return Collections.unmodifiableList(clusterStateHistory); + } + + boolean candidateChangedEnoughFromCurrentToWarrantPublish() { + return !currentUnversionedState.similarToIgnoringInitProgress(latestCandidateState.getClusterState()); + } + + void promoteCandidateToVersionedState(final long currentTimeMs) { + final int newVersion = currentVersion + 1; + updateStatesForNewVersion(latestCandidateState, newVersion); + currentVersion = newVersion; + + recordCurrentStateInHistoryAtTime(currentTimeMs); + } + + private void updateStatesForNewVersion(final AnnotatedClusterState newState, final int newVersion) { + currentClusterState = new AnnotatedClusterState( + newState.getClusterState().clone(), // Because we mutate version below + newState.getClusterStateReason(), + newState.getNodeStateReasons()); + currentClusterState.getClusterState().setVersion(newVersion); + currentUnversionedState = newState.getClusterState().clone(); + lowestObservedDistributionBits = Math.min( + lowestObservedDistributionBits, + newState.getClusterState().getDistributionBitCount()); + // TODO should this take place in updateLatestCandidateState instead? I.e. does it require a consolidated state? + clusterStateView = ClusterStateView.create(currentClusterState.getClusterState(), metricUpdater); + } + + private void recordCurrentStateInHistoryAtTime(final long currentTimeMs) { + clusterStateHistory.addFirst(new ClusterStateHistoryEntry( + currentClusterState.getClusterState(), currentTimeMs)); + while (clusterStateHistory.size() > maxHistoryEntryCount) { + clusterStateHistory.removeLast(); + } + } + + void handleUpdatedHostInfo(final Map<Integer, String> hostnames, final NodeInfo node, final HostInfo hostInfo) { + // TODO the wiring here isn't unit tested. Need mockable integration points. + clusterStateView.handleUpdatedHostInfo(hostnames, node, hostInfo); + } + +} diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateGenerator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateGenerator.java deleted file mode 100644 index 7edff399633..00000000000 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateGenerator.java +++ /dev/null @@ -1,941 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.clustercontroller.core; - -import com.yahoo.jrt.Spec; -import com.yahoo.log.LogLevel; -import com.yahoo.vdslib.distribution.ConfiguredNode; -import com.yahoo.vdslib.distribution.Distribution; -import com.yahoo.vdslib.state.*; -import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler; -import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo; -import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChangeHandler; -import com.yahoo.vespa.clustercontroller.core.listeners.SystemStateListener; - -import java.util.*; -import java.util.logging.Logger; -import java.text.ParseException; -import java.util.stream.Collectors; - -/** - * This class get node state updates and uses them to decide the cluster state. - */ -// TODO: Remove all current state from this and make it rely on state from ClusterInfo instead -// TODO: Do this ASAP! SystemStateGenerator should ideally behave as a pure function! -public class SystemStateGenerator { - - private static Logger log = Logger.getLogger(SystemStateGenerator.class.getName()); - - private final Timer timer; - private final EventLogInterface eventLog; - private ClusterStateView currentClusterStateView; - private ClusterStateView nextClusterStateView; - private Distribution distribution; - private boolean nextStateViewChanged = false; - private boolean isMaster = false; - - private Map<NodeType, Integer> maxTransitionTime = new TreeMap<>(); - private int maxInitProgressTime = 5000; - private int maxPrematureCrashes = 4; - private long stableStateTimePeriod = 60 * 60 * 1000; - private static final int maxHistorySize = 50; - private Set<ConfiguredNode> nodes; - private Map<Integer, String> hostnames = new HashMap<>(); - private int minDistributorNodesUp = 1; - private int minStorageNodesUp = 1; - private double minRatioOfDistributorNodesUp = 0.50; - private double minRatioOfStorageNodesUp = 0.50; - private double minNodeRatioPerGroup = 0.0; - private int maxSlobrokDisconnectGracePeriod = 1000; - private int idealDistributionBits = 16; - private static final boolean disableUnstableNodes = true; - - private final LinkedList<SystemStateHistoryEntry> systemStateHistory = new LinkedList<>(); - - /** - * @param metricUpdater may be null, in which case no metrics will be recorded. - */ - public SystemStateGenerator(Timer timer, EventLogInterface eventLog, MetricUpdater metricUpdater) { - try { - currentClusterStateView = ClusterStateView.create("", metricUpdater); - nextClusterStateView = ClusterStateView.create("", metricUpdater); - } catch (ParseException e) { - throw new RuntimeException("Parsing empty string should always work"); - } - this.timer = timer; - this.eventLog = eventLog; - maxTransitionTime.put(NodeType.DISTRIBUTOR, 5000); - maxTransitionTime.put(NodeType.STORAGE, 5000); - } - - public void handleAllDistributorsInSync(DatabaseHandler database, - DatabaseHandler.Context dbContext) throws InterruptedException { - int startTimestampsReset = 0; - for (NodeType nodeType : NodeType.getTypes()) { - for (ConfiguredNode configuredNode : nodes) { - Node node = new Node(nodeType, configuredNode.index()); - NodeInfo nodeInfo = dbContext.getCluster().getNodeInfo(node); - NodeState nodeState = nextClusterStateView.getClusterState().getNodeState(node); - if (nodeInfo != null && nodeState != null) { - if (nodeState.getStartTimestamp() > nodeInfo.getStartTimestamp()) { - log.log(LogLevel.DEBUG, "Storing away new start timestamp for node " + node); - nodeInfo.setStartTimestamp(nodeState.getStartTimestamp()); - } - if (nodeState.getStartTimestamp() > 0) { - log.log(LogLevel.DEBUG, "Resetting timestamp in cluster state for node " + node); - nodeState.setStartTimestamp(0); - nextClusterStateView.getClusterState().setNodeState(node, nodeState); - ++startTimestampsReset; - } - } else { - log.log(LogLevel.DEBUG, node + ": " + - (nodeInfo == null ? "null" : nodeInfo.getStartTimestamp()) + ", " + - (nodeState == null ? "null" : nodeState.getStartTimestamp())); - } - } - } - if (startTimestampsReset > 0) { - eventLog.add(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, "Reset " + startTimestampsReset + - " start timestamps as all available distributors have seen newest cluster state.", timer.getCurrentTimeInMillis())); - nextStateViewChanged = true; - database.saveStartTimestamps(dbContext); - } else { - log.log(LogLevel.DEBUG, "Found no start timestamps to reset in cluster state."); - } - } - - public void setMaxTransitionTime(Map<NodeType, Integer> map) { maxTransitionTime = map; } - public void setMaxInitProgressTime(int millisecs) { maxInitProgressTime = millisecs; } - public void setMaxPrematureCrashes(int count) { maxPrematureCrashes = count; } - public void setStableStateTimePeriod(long millisecs) { stableStateTimePeriod = millisecs; } - - public ClusterStateView currentClusterStateView() { return currentClusterStateView; } - - /** Returns an immutable list of the historical states this has generated */ - public List<SystemStateHistoryEntry> systemStateHistory() { - return Collections.unmodifiableList(systemStateHistory); - } - - public void setMinNodesUp(int minDistNodes, int minStorNodes, double minDistRatio, double minStorRatio) { - minDistributorNodesUp = minDistNodes; - minStorageNodesUp = minStorNodes; - minRatioOfDistributorNodesUp = minDistRatio; - minRatioOfStorageNodesUp = minStorRatio; - nextStateViewChanged = true; - } - - public void setMinNodeRatioPerGroup(double upRatio) { - this.minNodeRatioPerGroup = upRatio; - nextStateViewChanged = true; - } - - /** Sets the nodes of this and attempts to keep the node state in sync */ - public void setNodes(ClusterInfo newClusterInfo) { - this.nodes = new HashSet<>(newClusterInfo.getConfiguredNodes().values()); - - for (ConfiguredNode node : this.nodes) { - NodeInfo newNodeInfo = newClusterInfo.getStorageNodeInfo(node.index()); - NodeState currentState = currentClusterStateView.getClusterState().getNodeState(new Node(NodeType.STORAGE, node.index())); - if (currentState.getState() == State.RETIRED || currentState.getState() == State.UP) { // then correct to configured state - proposeNewNodeState(newNodeInfo, new NodeState(NodeType.STORAGE, node.retired() ? State.RETIRED : State.UP)); - } - } - - // Ensure that any nodes that have been removed from the config are also - // promptly removed from the next (and subsequent) generated cluster states. - pruneAllNodesNotContainedInConfig(); - - nextStateViewChanged = true; - } - - private void pruneAllNodesNotContainedInConfig() { - Set<Integer> configuredIndices = this.nodes.stream().map(ConfiguredNode::index).collect(Collectors.toSet()); - final ClusterState candidateNextState = nextClusterStateView.getClusterState(); - pruneNodesNotContainedInConfig(candidateNextState, configuredIndices, NodeType.DISTRIBUTOR); - pruneNodesNotContainedInConfig(candidateNextState, configuredIndices, NodeType.STORAGE); - } - - public void setDistribution(Distribution distribution) { - this.distribution = distribution; - nextStateViewChanged = true; - } - - public void setMaster(boolean isMaster) { - this.isMaster = isMaster; - } - public void setMaxSlobrokDisconnectGracePeriod(int millisecs) { maxSlobrokDisconnectGracePeriod = millisecs; } - - public void setDistributionBits(int bits) { - if (bits == idealDistributionBits) return; - idealDistributionBits = bits; - int currentDistributionBits = calculateMinDistributionBitCount(); - if (currentDistributionBits != nextClusterStateView.getClusterState().getDistributionBitCount()) { - nextClusterStateView.getClusterState().setDistributionBits(currentDistributionBits); - nextStateViewChanged = true; - } - } - - public int getDistributionBits() { return idealDistributionBits; } - - public int calculateMinDistributionBitCount() { - int currentDistributionBits = idealDistributionBits; - int minNode = -1; - for (ConfiguredNode node : nodes) { - NodeState ns = nextClusterStateView.getClusterState().getNodeState(new Node(NodeType.STORAGE, node.index())); - if (ns.getState().oneOf("iur")) { - if (ns.getMinUsedBits() < currentDistributionBits) { - currentDistributionBits = ns.getMinUsedBits(); - minNode = node.index(); - } - } - } - if (minNode == -1) { - log.log(LogLevel.DEBUG, "Distribution bit count should still be default as all available nodes have at least split to " + idealDistributionBits + " bits"); - } else { - log.log(LogLevel.DEBUG, "Distribution bit count is limited to " + currentDistributionBits + " due to storage node " + minNode); - } - return currentDistributionBits; - } - - public ClusterState getClusterState() { return currentClusterStateView.getClusterState(); } - - /** - * Return the current cluster state, but if the cluster is down, modify the node states with the - * actual node states from the temporary next state. - */ - public ClusterState getConsolidatedClusterState() { - ClusterState currentState = currentClusterStateView.getClusterState(); - if (currentState.getClusterState().equals(State.UP)) { - return currentState; - } - - ClusterState nextState = nextClusterStateView.getClusterState(); - if (!currentState.getClusterState().equals(nextState.getClusterState())) { - log.warning("Expected current cluster state object to have same global state as the under creation instance."); - } - ClusterState state = nextState.clone(); - state.setVersion(currentState.getVersion()); - state.setOfficial(false); - return state; - } - - private Optional<Event> getDownDueToTooFewNodesEvent(ClusterState nextClusterState) { - int upStorageCount = 0, upDistributorCount = 0; - int dcount = nodes.size(); - int scount = nodes.size(); - for (NodeType type : NodeType.getTypes()) { - for (ConfiguredNode node : nodes) { - NodeState ns = nextClusterState.getNodeState(new Node(type, node.index())); - if (ns.getState() == State.UP || ns.getState() == State.RETIRED || ns.getState() == State.INITIALIZING) { - if (type.equals(NodeType.STORAGE)) - ++upStorageCount; - else - ++upDistributorCount; - } - } - } - - long timeNow = timer.getCurrentTimeInMillis(); - if (upStorageCount < minStorageNodesUp) { - return Optional.of(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, - "Less than " + minStorageNodesUp + " storage nodes available (" + upStorageCount + "). Setting cluster state down.", - timeNow)); - } - if (upDistributorCount < minDistributorNodesUp) { - return Optional.of(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, - "Less than " + minDistributorNodesUp + " distributor nodes available (" + upDistributorCount + "). Setting cluster state down.", - timeNow)); - } - if (minRatioOfStorageNodesUp * scount > upStorageCount) { - return Optional.of(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, - "Less than " + (100 * minRatioOfStorageNodesUp) + " % of storage nodes are available (" - + upStorageCount + "/" + scount + "). Setting cluster state down.", - timeNow)); - } - if (minRatioOfDistributorNodesUp * dcount > upDistributorCount) { - return Optional.of(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, - "Less than " + (100 * minRatioOfDistributorNodesUp) + " % of distributor nodes are available (" - + upDistributorCount + "/" + dcount + "). Setting cluster state down.", - timeNow)); - } - return Optional.empty(); - } - - private static Node storageNode(int index) { - return new Node(NodeType.STORAGE, index); - } - - private void performImplicitStorageNodeStateTransitions(ClusterState candidateState, ContentCluster cluster) { - if (distribution == null) { - return; // FIXME due to tests that don't bother setting distr config! Never happens in prod. - } - // First clear the states of any nodes that according to reported/wanted state alone - // should have their states cleared. We might still take these down again based on the - // decisions of the group availability calculator, but this way we ensure that groups - // that no longer should be down will have their nodes implicitly made available again. - // TODO this will be void once SystemStateGenerator has been rewritten to be stateless. - final Set<Integer> clearedNodes = clearDownStateForStorageNodesThatCanBeUp(candidateState, cluster); - - final GroupAvailabilityCalculator calc = new GroupAvailabilityCalculator.Builder() - .withMinNodeRatioPerGroup(minNodeRatioPerGroup) - .withDistribution(distribution) - .build(); - final Set<Integer> nodesToTakeDown = calc.nodesThatShouldBeDown(candidateState); - markNodesAsDownDueToGroupUnavailability(cluster, candidateState, nodesToTakeDown, clearedNodes); - - clearedNodes.removeAll(nodesToTakeDown); - logEventsForNodesThatWereTakenUp(clearedNodes, cluster); - } - - private void logEventsForNodesThatWereTakenUp(Set<Integer> newlyUpNodes, ContentCluster cluster) { - newlyUpNodes.forEach(i -> { - final NodeInfo info = cluster.getNodeInfo(storageNode(i)); // Should always be non-null here. - // TODO the fact that this only happens for group up events is implementation specific - // should generalize this if we get other such events. - eventLog.addNodeOnlyEvent(new NodeEvent(info, - "Group availability restored; taking node back up", - NodeEvent.Type.CURRENT, timer.getCurrentTimeInMillis()), LogLevel.INFO); - }); - } - - private void markNodesAsDownDueToGroupUnavailability(ContentCluster cluster, - ClusterState candidateState, - Set<Integer> nodesToTakeDown, - Set<Integer> clearedNodes) - { - for (Integer idx : nodesToTakeDown) { - final Node node = storageNode(idx); - NodeState newState = new NodeState(NodeType.STORAGE, State.DOWN); - newState.setDescription("group node availability below configured threshold"); - candidateState.setNodeState(node, newState); - - logNodeGroupDownEdgeEventOnce(clearedNodes, node, cluster); - } - } - - private void logNodeGroupDownEdgeEventOnce(Set<Integer> clearedNodes, Node node, ContentCluster cluster) { - final NodeInfo nodeInfo = cluster.getNodeInfo(node); - // If clearedNodes contains the index it means we're just re-downing a node - // that was previously down. If this is the case, we'd cause a duplicate - // event if we logged it now as well. - if (nodeInfo != null && !clearedNodes.contains(node.getIndex())) { - eventLog.addNodeOnlyEvent(new NodeEvent(nodeInfo, - "Setting node down as the total availability of its group is " + - "below the configured threshold", - NodeEvent.Type.CURRENT, timer.getCurrentTimeInMillis()), LogLevel.INFO); - } - } - - private NodeState baselineNodeState(NodeInfo info) { - NodeState reported = info.getReportedState(); - NodeState wanted = info.getWantedState(); - - final NodeState baseline = reported.clone(); - if (wanted.getState() != State.UP) { - baseline.setDescription(wanted.getDescription()); - if (reported.above(wanted)) { - baseline.setState(wanted.getState()); - } - } - // Don't reintroduce start timestamp to the node's state if it has already been - // observed by all distributors. This matches how handleNewReportedNodeState() sets timestamps. - // TODO make timestamp semantics clearer. Non-obvious what the two different timestamp stores imply. - // For posterity: reported.getStartTimestamp() is the start timestamp the node itself has stated. - // info.getStartTimestamp() is the timestamp written as having been observed by all distributors - // (which is done in handleAllDistributorsInSync()). - if (reported.getStartTimestamp() <= info.getStartTimestamp()) { - baseline.setStartTimestamp(0); - } - - return baseline; - } - - // Returns set of nodes whose state was cleared - private Set<Integer> clearDownStateForStorageNodesThatCanBeUp( - ClusterState candidateState, ContentCluster cluster) - { - final int nodeCount = candidateState.getNodeCount(NodeType.STORAGE); - final Set<Integer> clearedNodes = new HashSet<>(); - for (int i = 0; i < nodeCount; ++i) { - final Node node = storageNode(i); - final NodeInfo info = cluster.getNodeInfo(node); - final NodeState currentState = candidateState.getNodeState(node); - if (mayClearCurrentNodeState(currentState, info)) { - candidateState.setNodeState(node, baselineNodeState(info)); - clearedNodes.add(i); - } - } - return clearedNodes; - } - - private boolean mayClearCurrentNodeState(NodeState currentState, NodeInfo info) { - if (currentState.getState() != State.DOWN) { - return false; - } - if (info == null) { - // Nothing known about node in cluster info; we definitely don't want it - // to be taken up at this point. - return false; - } - // There exists an edge in watchTimers where a node in Maintenance is implicitly - // transitioned into Down without being Down in either reported or wanted states - // iff isRpcAddressOutdated() is true. To avoid getting into an edge where we - // inadvertently clear this state because its reported/wanted states seem fine, - // we must also check if that particular edge could have happened. I.e. whether - // the node's RPC address is marked as outdated. - // It also makes sense in general to not allow taking a node back up automatically - // if its RPC connectivity appears to be bad. - if (info.isRpcAddressOutdated()) { - return false; - } - // Rationale: we can only enter this statement if the _current_ (generated) state - // of the node is Down. Aside from the group take-down logic, there should not exist - // any other edges in the cluster controller state transition logic where a node - // may be set Down while both its reported state and wanted state imply that a better - // state should already have been chosen. Consequently we allow the node to have its - // Down-state cleared. - return (info.getReportedState().getState() != State.DOWN - && !info.getWantedState().getState().oneOf("d")); - } - - private ClusterStateView createNextVersionOfClusterStateView(ContentCluster cluster) { - // If you change this method, see *) in notifyIfNewSystemState - ClusterStateView candidateClusterStateView = nextClusterStateView.cloneForNewState(); - ClusterState candidateClusterState = candidateClusterStateView.getClusterState(); - - int currentDistributionBits = calculateMinDistributionBitCount(); - if (currentDistributionBits != nextClusterStateView.getClusterState().getDistributionBitCount()) { - candidateClusterState.setDistributionBits(currentDistributionBits); - } - performImplicitStorageNodeStateTransitions(candidateClusterState, cluster); - - return candidateClusterStateView; - } - - private void pruneNodesNotContainedInConfig(ClusterState candidateClusterState, - Set<Integer> configuredIndices, - NodeType nodeType) - { - final int nodeCount = candidateClusterState.getNodeCount(nodeType); - for (int i = 0; i < nodeCount; ++i) { - final Node node = new Node(nodeType, i); - final NodeState currentState = candidateClusterState.getNodeState(node); - if (!configuredIndices.contains(i) && !currentState.getState().equals(State.DOWN)) { - log.log(LogLevel.INFO, "Removing node " + node + " from state as it is no longer present in config"); - candidateClusterState.setNodeState(node, new NodeState(nodeType, State.DOWN)); - } - } - } - - private void recordNewClusterStateHasBeenChosen( - ClusterState currentClusterState, ClusterState newClusterState, Event clusterEvent) { - long timeNow = timer.getCurrentTimeInMillis(); - - if (!currentClusterState.getClusterState().equals(State.UP) && - newClusterState.getClusterState().equals(State.UP)) { - eventLog.add(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, - "Enough nodes available for system to become up.", timeNow), isMaster); - } else if (currentClusterState.getClusterState().equals(State.UP) && - ! newClusterState.getClusterState().equals(State.UP)) { - assert(clusterEvent != null); - eventLog.add(clusterEvent, isMaster); - } - - if (newClusterState.getDistributionBitCount() != currentClusterState.getDistributionBitCount()) { - eventLog.add(new ClusterEvent( - ClusterEvent.Type.SYSTEMSTATE, - "Altering distribution bits in system from " - + currentClusterState.getDistributionBitCount() + " to " + - currentClusterState.getDistributionBitCount(), - timeNow), isMaster); - } - - eventLog.add(new ClusterEvent( - ClusterEvent.Type.SYSTEMSTATE, - "New cluster state version " + newClusterState.getVersion() + ". Change from last: " + - currentClusterState.getTextualDifference(newClusterState), - timeNow), isMaster); - - log.log(LogLevel.DEBUG, "Created new cluster state version: " + newClusterState.toString(true)); - systemStateHistory.addFirst(new SystemStateHistoryEntry(newClusterState, timeNow)); - if (systemStateHistory.size() > maxHistorySize) { - systemStateHistory.removeLast(); - } - } - - private void mergeIntoNextClusterState(ClusterState sourceState) { - final ClusterState nextState = nextClusterStateView.getClusterState(); - final int nodeCount = sourceState.getNodeCount(NodeType.STORAGE); - for (int i = 0; i < nodeCount; ++i) { - final Node node = storageNode(i); - final NodeState stateInSource = sourceState.getNodeState(node); - final NodeState stateInTarget = nextState.getNodeState(node); - if (stateInSource.getState() != stateInTarget.getState()) { - nextState.setNodeState(node, stateInSource); - } - } - } - - public boolean notifyIfNewSystemState(ContentCluster cluster, SystemStateListener stateListener) { - if ( ! nextStateViewChanged) return false; - - ClusterStateView newClusterStateView = createNextVersionOfClusterStateView(cluster); - - ClusterState newClusterState = newClusterStateView.getClusterState(); - // Creating the next version of the state may implicitly take down nodes, so our checks - // for taking the entire cluster down must happen _after_ this - Optional<Event> clusterDown = getDownDueToTooFewNodesEvent(newClusterState); - newClusterState.setClusterState(clusterDown.isPresent() ? State.DOWN : State.UP); - - if (newClusterState.similarTo(currentClusterStateView.getClusterState())) { - log.log(LogLevel.DEBUG, - "State hasn't changed enough to warrant new cluster state. Not creating new state: " + - currentClusterStateView.getClusterState().getTextualDifference(newClusterState)); - return false; - } - - // Update the version of newClusterState now. This cannot be done prior to similarTo(), - // since it makes the cluster states different. From now on, the new cluster state is immutable. - newClusterState.setVersion(currentClusterStateView.getClusterState().getVersion() + 1); - - recordNewClusterStateHasBeenChosen(currentClusterStateView.getClusterState(), - newClusterStateView.getClusterState(), clusterDown.orElse(null)); - - // *) Ensure next state is still up to date. - // This should make nextClusterStateView a deep-copy of currentClusterStateView. - // If more than the distribution bits and state are deep-copied in - // createNextVersionOfClusterStateView(), we need to add corresponding statements here. - // This seems like a hack... - nextClusterStateView.getClusterState().setDistributionBits(newClusterState.getDistributionBitCount()); - nextClusterStateView.getClusterState().setClusterState(newClusterState.getClusterState()); - mergeIntoNextClusterState(newClusterState); - - currentClusterStateView = newClusterStateView; - nextStateViewChanged = false; - - stateListener.handleNewSystemState(currentClusterStateView.getClusterState()); - - return true; - } - - public void setLatestSystemStateVersion(int version) { - currentClusterStateView.getClusterState().setVersion(Math.max(1, version)); - nextStateViewChanged = true; - } - - private void setNodeState(NodeInfo node, NodeState newState) { - NodeState oldState = nextClusterStateView.getClusterState().getNodeState(node.getNode()); - - // Correct UP to RETIRED if the node wants to be retired - if (newState.above(node.getWantedState())) - newState.setState(node.getWantedState().getState()); - - // Keep old description if a new one is not set and we're not going up or in initializing mode - if ( ! newState.getState().oneOf("ui") && oldState.hasDescription()) { - newState.setDescription(oldState.getDescription()); - } - - // Keep disk information if not set in new state - if (newState.getDiskCount() == 0 && oldState.getDiskCount() != 0) { - newState.setDiskCount(oldState.getDiskCount()); - for (int i=0; i<oldState.getDiskCount(); ++i) { - newState.setDiskState(i, oldState.getDiskState(i)); - } - } - if (newState.equals(oldState)) { - return; - } - - eventLog.add(new NodeEvent(node, "Altered node state in cluster state from '" + oldState.toString(true) - + "' to '" + newState.toString(true) + "'.", - NodeEvent.Type.CURRENT, timer.getCurrentTimeInMillis()), isMaster); - nextClusterStateView.getClusterState().setNodeState(node.getNode(), newState); - nextStateViewChanged = true; - } - - public void handleNewReportedNodeState(NodeInfo node, NodeState reportedState, NodeStateOrHostInfoChangeHandler nodeListener) { - ClusterState nextState = nextClusterStateView.getClusterState(); - NodeState currentState = nextState.getNodeState(node.getNode()); - log.log(currentState.equals(reportedState) && node.getVersion() == 0 ? LogLevel.SPAM : LogLevel.DEBUG, - "Got nodestate reply from " + node + ": " - + node.getReportedState().getTextualDifference(reportedState) + " (Current state is " + currentState.toString(true) + ")"); - long currentTime = timer.getCurrentTimeInMillis(); - if (reportedState.getState().equals(State.DOWN)) { - node.setTimeOfFirstFailingConnectionAttempt(currentTime); - } - if ( ! reportedState.similarTo(node.getReportedState())) { - if (reportedState.getState().equals(State.DOWN)) { - eventLog.addNodeOnlyEvent(new NodeEvent(node, "Failed to get node state: " + reportedState.toString(true), NodeEvent.Type.REPORTED, currentTime), LogLevel.INFO); - } else { - eventLog.addNodeOnlyEvent(new NodeEvent(node, "Now reporting state " + reportedState.toString(true), NodeEvent.Type.REPORTED, currentTime), LogLevel.DEBUG); - } - } - if (reportedState.equals(node.getReportedState()) && ! reportedState.getState().equals(State.INITIALIZING)) - return; - - NodeState alteredState = decideNodeStateGivenReportedState(node, currentState, reportedState, nodeListener); - if (alteredState != null) { - ClusterState clusterState = currentClusterStateView.getClusterState(); - - if (alteredState.above(node.getWantedState())) { - log.log(LogLevel.DEBUG, "Cannot set node in state " + alteredState.getState() + " when wanted state is " + node.getWantedState()); - alteredState.setState(node.getWantedState().getState()); - } - if (reportedState.getStartTimestamp() > node.getStartTimestamp()) { - alteredState.setStartTimestamp(reportedState.getStartTimestamp()); - } else { - alteredState.setStartTimestamp(0); - } - if (!alteredState.similarTo(currentState)) { - setNodeState(node, alteredState); - } else if (!alteredState.equals(currentState)) { - if (currentState.getState().equals(State.INITIALIZING) && alteredState.getState().equals(State.INITIALIZING) && - Math.abs(currentState.getInitProgress() - alteredState.getInitProgress()) > 0.000000001) - { - log.log(LogLevel.DEBUG, "Only silently updating init progress for " + node + " in cluster state because new " - + "state is too similar to tag new version: " + currentState.getTextualDifference(alteredState)); - currentState.setInitProgress(alteredState.getInitProgress()); - nextState.setNodeState(node.getNode(), currentState); - - NodeState currentNodeState = clusterState.getNodeState(node.getNode()); - if (currentNodeState.getState().equals(State.INITIALIZING)) { - currentNodeState.setInitProgress(alteredState.getInitProgress()); - clusterState.setNodeState(node.getNode(), currentNodeState); - } - } else if (alteredState.getMinUsedBits() != currentState.getMinUsedBits()) { - log.log(LogLevel.DEBUG, "Altering node state to reflect that min distribution bit count have changed from " - + currentState.getMinUsedBits() + " to " + alteredState.getMinUsedBits()); - int oldCount = currentState.getMinUsedBits(); - currentState.setMinUsedBits(alteredState.getMinUsedBits()); - nextState.setNodeState(node.getNode(), currentState); - int minDistBits = calculateMinDistributionBitCount(); - if (minDistBits < nextState.getDistributionBitCount() - || (nextState.getDistributionBitCount() < this.idealDistributionBits && minDistBits >= this.idealDistributionBits)) - { - // If this will actually affect global cluster state. - eventLog.add(new NodeEvent(node, "Altered min distribution bit count from " + oldCount - + " to " + currentState.getMinUsedBits() + ". Updated cluster state.", NodeEvent.Type.CURRENT, currentTime), isMaster); - nextStateViewChanged = true; - } else { - log.log(LogLevel.DEBUG, "Altered min distribution bit count from " + oldCount - + " to " + currentState.getMinUsedBits() + ". No effect for cluster state with ideal " + this.idealDistributionBits - + ", new " + minDistBits + ", old " + nextState.getDistributionBitCount() + " though."); - clusterState.setNodeState(node.getNode(), currentState); - } - } else { - log.log(LogLevel.DEBUG, "Not altering state of " + node + " in cluster state because new state is too similar: " - + currentState.getTextualDifference(alteredState)); - } - } else if (alteredState.getDescription().contains("Listing buckets")) { - currentState.setDescription(alteredState.getDescription()); - nextState.setNodeState(node.getNode(), currentState); - NodeState currentNodeState = clusterState.getNodeState(node.getNode()); - currentNodeState.setDescription(alteredState.getDescription()); - clusterState.setNodeState(node.getNode(), currentNodeState); - } - } - } - - public void handleNewNode(NodeInfo node) { - setHostName(node); - String message = "Found new node " + node + " in slobrok at " + node.getRpcAddress(); - eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster); - } - - public void handleMissingNode(NodeInfo node, NodeStateOrHostInfoChangeHandler nodeListener) { - removeHostName(node); - - long timeNow = timer.getCurrentTimeInMillis(); - - if (node.getLatestNodeStateRequestTime() != null) { - eventLog.add(new NodeEvent(node, "Node is no longer in slobrok, but we still have a pending state request.", NodeEvent.Type.REPORTED, timeNow), isMaster); - } else { - eventLog.add(new NodeEvent(node, "Node is no longer in slobrok. No pending state request to node.", NodeEvent.Type.REPORTED, timeNow), isMaster); - } - if (node.getReportedState().getState().equals(State.STOPPING)) { - log.log(LogLevel.DEBUG, "Node " + node.getNode() + " is no longer in slobrok. Was in stopping state, so assuming it has shut down normally. Setting node down"); - NodeState ns = node.getReportedState().clone(); - ns.setState(State.DOWN); - handleNewReportedNodeState(node, ns.clone(), nodeListener); - node.setReportedState(ns, timer.getCurrentTimeInMillis()); // Must reset it to null to get connection attempts counted - } else { - log.log(LogLevel.DEBUG, "Node " + node.getNode() + " no longer in slobrok was in state " + node.getReportedState() + ". Waiting to see if it reappears in slobrok"); - } - } - - /** - * Propose a new state for a node. This may happen due to an administrator action, orchestration, or - * a configuration change. - */ - public void proposeNewNodeState(NodeInfo node, NodeState proposedState) { - NodeState currentState = nextClusterStateView.getClusterState().getNodeState(node.getNode()); - NodeState currentReported = node.getReportedState(); // TODO: Is there a reason to have both of this and the above? - - NodeState newCurrentState = currentReported.clone(); - - newCurrentState.setState(proposedState.getState()).setDescription(proposedState.getDescription()); - - if (currentState.getState().equals(newCurrentState.getState())) return; - - log.log(LogLevel.DEBUG, "Got new wanted nodestate for " + node + ": " + currentState.getTextualDifference(proposedState)); - // Should be checked earlier before state was set in cluster - assert(newCurrentState.getState().validWantedNodeState(node.getNode().getType())); - long timeNow = timer.getCurrentTimeInMillis(); - if (newCurrentState.above(currentReported)) { - eventLog.add(new NodeEvent(node, "Wanted state " + newCurrentState + ", but we cannot force node into that state yet as it is currently in " + currentReported, NodeEvent.Type.REPORTED, timeNow), isMaster); - return; - } - if ( ! newCurrentState.similarTo(currentState)) { - eventLog.add(new NodeEvent(node, "Node state set to " + newCurrentState + ".", NodeEvent.Type.WANTED, timeNow), isMaster); - } - setNodeState(node, newCurrentState); - } - - public void handleNewRpcAddress(NodeInfo node) { - setHostName(node); - String message = "Node " + node + " has a new address in slobrok: " + node.getRpcAddress(); - eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster); - } - - public void handleReturnedRpcAddress(NodeInfo node) { - setHostName(node); - String message = "Node got back into slobrok with same address as before: " + node.getRpcAddress(); - eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster); - } - - private void setHostName(NodeInfo node) { - String rpcAddress = node.getRpcAddress(); - if (rpcAddress == null) { - // This may happen if we haven't seen the node in Slobrok yet. - return; - } - - Spec address = new Spec(rpcAddress); - if (address.malformed()) { - return; - } - - hostnames.put(node.getNodeIndex(), address.host()); - } - - private void removeHostName(NodeInfo node) { - hostnames.remove(node.getNodeIndex()); - } - - public boolean watchTimers(ContentCluster cluster, NodeStateOrHostInfoChangeHandler nodeListener) { - boolean triggeredAnyTimers = false; - long currentTime = timer.getCurrentTimeInMillis(); - for(NodeInfo node : cluster.getNodeInfo()) { - NodeState currentStateInSystem = nextClusterStateView.getClusterState().getNodeState(node.getNode()); - NodeState lastReportedState = node.getReportedState(); - - // If we haven't had slobrok contact in a given amount of time and node is still not considered down, - // mark it down. - if (node.isRpcAddressOutdated() - && !lastReportedState.getState().equals(State.DOWN) - && node.getRpcAddressOutdatedTimestamp() + maxSlobrokDisconnectGracePeriod <= currentTime) - { - StringBuilder sb = new StringBuilder().append("Set node down as it has been out of slobrok for ") - .append(currentTime - node.getRpcAddressOutdatedTimestamp()).append(" ms which is more than the max limit of ") - .append(maxSlobrokDisconnectGracePeriod).append(" ms."); - node.abortCurrentNodeStateRequests(); - NodeState state = lastReportedState.clone(); - state.setState(State.DOWN); - if (!state.hasDescription()) state.setDescription(sb.toString()); - eventLog.add(new NodeEvent(node, sb.toString(), NodeEvent.Type.CURRENT, currentTime), isMaster); - handleNewReportedNodeState(node, state.clone(), nodeListener); - node.setReportedState(state, currentTime); - triggeredAnyTimers = true; - } - - // If node is still unavailable after transition time, mark it down - if (currentStateInSystem.getState().equals(State.MAINTENANCE) - && ( ! nextStateViewChanged || ! this.nextClusterStateView.getClusterState().getNodeState(node.getNode()).getState().equals(State.DOWN)) - && node.getWantedState().above(new NodeState(node.getNode().getType(), State.DOWN)) - && (lastReportedState.getState().equals(State.DOWN) || node.isRpcAddressOutdated()) - && node.getTransitionTime() + maxTransitionTime.get(node.getNode().getType()) < currentTime) - { - eventLog.add(new NodeEvent(node, (currentTime - node.getTransitionTime()) - + " milliseconds without contact. Marking node down.", NodeEvent.Type.CURRENT, currentTime), isMaster); - NodeState newState = new NodeState(node.getNode().getType(), State.DOWN).setDescription( - (currentTime - node.getTransitionTime()) + " ms without contact. Too long to keep in maintenance. Marking node down"); - // Keep old description if there is one as it is likely closer to the cause of the problem - if (currentStateInSystem.hasDescription()) newState.setDescription(currentStateInSystem.getDescription()); - setNodeState(node, newState); - triggeredAnyTimers = true; - } - - // If node hasn't increased its initializing progress within initprogresstime, mark it down. - if (!currentStateInSystem.getState().equals(State.DOWN) - && node.getWantedState().above(new NodeState(node.getNode().getType(), State.DOWN)) - && lastReportedState.getState().equals(State.INITIALIZING) - && maxInitProgressTime != 0 - && node.getInitProgressTime() + maxInitProgressTime <= currentTime - && node.getNode().getType().equals(NodeType.STORAGE)) - { - eventLog.add(new NodeEvent(node, (currentTime - node.getInitProgressTime()) + " milliseconds " - + "without initialize progress. Marking node down." - + " Premature crash count is now " + (node.getPrematureCrashCount() + 1) + ".", NodeEvent.Type.CURRENT, currentTime), isMaster); - NodeState newState = new NodeState(node.getNode().getType(), State.DOWN).setDescription( - (currentTime - node.getInitProgressTime()) + " ms without initialize progress. Assuming node has deadlocked."); - setNodeState(node, newState); - handlePrematureCrash(node, nodeListener); - triggeredAnyTimers = true; - } - if (node.getUpStableStateTime() + stableStateTimePeriod <= currentTime - && lastReportedState.getState().equals(State.UP) - && node.getPrematureCrashCount() <= maxPrematureCrashes - && node.getPrematureCrashCount() != 0) - { - node.setPrematureCrashCount(0); - log.log(LogLevel.DEBUG, "Resetting premature crash count on node " + node + " as it has been up for a long time."); - triggeredAnyTimers = true; - } else if (node.getDownStableStateTime() + stableStateTimePeriod <= currentTime - && lastReportedState.getState().equals(State.DOWN) - && node.getPrematureCrashCount() <= maxPrematureCrashes - && node.getPrematureCrashCount() != 0) - { - node.setPrematureCrashCount(0); - log.log(LogLevel.DEBUG, "Resetting premature crash count on node " + node + " as it has been down for a long time."); - triggeredAnyTimers = true; - } - } - return triggeredAnyTimers; - } - - private boolean isControlledShutdown(NodeState state) { - return (state.getState() == State.STOPPING && (state.getDescription().contains("Received signal 15 (SIGTERM - Termination signal)") - || state.getDescription().contains("controlled shutdown"))); - } - - /** - * Decide the state assigned to a new node given the state it reported - * - * @param node the node we are computing the state of - * @param currentState the current state of the node - * @param reportedState the new state reported by (or, in the case of down - inferred from) the node - * @param nodeListener this listener is notified for some of the system state changes that this will return - * @return the node node state, or null to keep the nodes current state - */ - private NodeState decideNodeStateGivenReportedState(NodeInfo node, NodeState currentState, NodeState reportedState, - NodeStateOrHostInfoChangeHandler nodeListener) { - long timeNow = timer.getCurrentTimeInMillis(); - - log.log(LogLevel.DEBUG, "Finding new cluster state entry for " + node + " switching state " + currentState.getTextualDifference(reportedState)); - - // Set nodes in maintenance if 1) down, or 2) initializing but set retired, to avoid migrating data - // to the retired node while it is initializing - if (currentState.getState().oneOf("ur") && reportedState.getState().oneOf("dis") - && (node.getWantedState().getState().equals(State.RETIRED) || !reportedState.getState().equals(State.INITIALIZING))) - { - long currentTime = timer.getCurrentTimeInMillis(); - node.setTransitionTime(currentTime); - if (node.getUpStableStateTime() + stableStateTimePeriod > currentTime && !isControlledShutdown(reportedState)) { - log.log(LogLevel.DEBUG, "Stable state: " + node.getUpStableStateTime() + " + " + stableStateTimePeriod + " > " + currentTime); - eventLog.add(new NodeEvent(node, - "Stopped or possibly crashed after " + (currentTime - node.getUpStableStateTime()) - + " ms, which is before stable state time period." - + " Premature crash count is now " + (node.getPrematureCrashCount() + 1) + ".", - NodeEvent.Type.CURRENT, - timeNow), isMaster); - if (handlePrematureCrash(node, nodeListener)) return null; - } - if (maxTransitionTime.get(node.getNode().getType()) != 0) { - return new NodeState(node.getNode().getType(), State.MAINTENANCE).setDescription(reportedState.getDescription()); - } - } - - // If we got increasing initialization progress, reset initialize timer - if (reportedState.getState().equals(State.INITIALIZING) && - (!currentState.getState().equals(State.INITIALIZING) || - reportedState.getInitProgress() > currentState.getInitProgress())) - { - node.setInitProgressTime(timer.getCurrentTimeInMillis()); - log.log(LogLevel.DEBUG, "Reset initialize timer on " + node + " to " + node.getInitProgressTime()); - } - - // If we get reverse initialize progress, mark node unstable, such that we don't mark it initializing again before it is up. - if (currentState.getState().equals(State.INITIALIZING) && - (reportedState.getState().equals(State.INITIALIZING) && reportedState.getInitProgress() < currentState.getInitProgress())) - { - eventLog.add(new NodeEvent(node, "Stop or crash during initialization detected from reverse initializing progress." - + " Progress was " + currentState.getInitProgress() + " but is now " + reportedState.getInitProgress() + "." - + " Premature crash count is now " + (node.getPrematureCrashCount() + 1) + ".", - NodeEvent.Type.CURRENT, timeNow), isMaster); - return (handlePrematureCrash(node, nodeListener) ? null : new NodeState(node.getNode().getType(), State.DOWN).setDescription( - "Got reverse intialize progress. Assuming node have prematurely crashed")); - } - - // If we go down while initializing, mark node unstable, such that we don't mark it initializing again before it is up. - if (currentState.getState().equals(State.INITIALIZING) && reportedState.getState().oneOf("ds") && !isControlledShutdown(reportedState)) - { - eventLog.add(new NodeEvent(node, "Stop or crash during initialization." - + " Premature crash count is now " + (node.getPrematureCrashCount() + 1) + ".", - NodeEvent.Type.CURRENT, timeNow), isMaster); - return (handlePrematureCrash(node, nodeListener) ? null : new NodeState(node.getNode().getType(), State.DOWN).setDescription(reportedState.getDescription())); - } - - // Ignore further unavailable states when node is set in maintenance - if (currentState.getState().equals(State.MAINTENANCE) && reportedState.getState().oneOf("dis")) - { - if (node.getWantedState().getState().equals(State.RETIRED) || !reportedState.getState().equals(State.INITIALIZING) - || reportedState.getInitProgress() <= NodeState.getListingBucketsInitProgressLimit() + 0.00001) { - log.log(LogLevel.DEBUG, "Ignoring down and initializing reports while in maintenance mode on " + node + "."); - return null; - } - } - - // Hide initializing state if node has been unstable. (Not for distributors as these own buckets while initializing) - if ((currentState.getState().equals(State.DOWN) || currentState.getState().equals(State.UP)) && - reportedState.getState().equals(State.INITIALIZING) && node.getPrematureCrashCount() > 0 && - !node.isDistributor()) - { - log.log(LogLevel.DEBUG, "Not setting " + node + " initializing again as it crashed prematurely earlier."); - return new NodeState(node.getNode().getType(), State.DOWN).setDescription("Not setting node back up as it failed prematurely at last attempt"); - } - // Hide initializing state in cluster state if initialize progress is so low that we haven't listed buckets yet - if (!node.isDistributor() && reportedState.getState().equals(State.INITIALIZING) && - reportedState.getInitProgress() <= NodeState.getListingBucketsInitProgressLimit() + 0.00001) - { - log.log(LogLevel.DEBUG, "Not setting " + node + " initializing in cluster state quite yet, as initializing progress still indicate it is listing buckets."); - return new NodeState(node.getNode().getType(), State.DOWN).setDescription("Listing buckets. Progress " + (100 * reportedState.getInitProgress()) + " %."); - } - return reportedState.clone(); - } - - public boolean handlePrematureCrash(NodeInfo node, NodeStateOrHostInfoChangeHandler changeListener) { - node.setPrematureCrashCount(node.getPrematureCrashCount() + 1); - if (disableUnstableNodes && node.getPrematureCrashCount() > maxPrematureCrashes) { - NodeState wantedState = new NodeState(node.getNode().getType(), State.DOWN) - .setDescription("Disabled by fleet controller as it prematurely shut down " + node.getPrematureCrashCount() + " times in a row"); - NodeState oldState = node.getWantedState(); - node.setWantedState(wantedState); - if ( ! oldState.equals(wantedState)) { - changeListener.handleNewWantedNodeState(node, wantedState); - } - return true; - } - return false; - } - - public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo hostInfo) { - // Only pass the host info to the latest cluster state view. - currentClusterStateView.handleUpdatedHostInfo(hostnames, nodeInfo, hostInfo); - } - - public class SystemStateHistoryEntry { - - private final ClusterState state; - private final long time; - - SystemStateHistoryEntry(ClusterState state, long time) { - this.state = state; - this.time = time; - } - - public ClusterState state() { return state; } - - public long time() { return time; } - - } - -} diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java index a21ed994d5d..c4e7c6897e1 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java @@ -248,6 +248,8 @@ public class DatabaseHandler { log.log(LogLevel.DEBUG, "Fleetcontroller " + nodeIndex + ": Attempting to store last system state version " + pendingStore.lastSystemStateVersion + " into zookeeper."); + // TODO guard version write with a CaS predicated on the version we last read/wrote. + // TODO Drop leadership status if there is a mismatch, as it implies we're racing with another leader. if (database.storeLatestSystemStateVersion(pendingStore.lastSystemStateVersion)) { currentlyStored.lastSystemStateVersion = pendingStore.lastSystemStateVersion; pendingStore.lastSystemStateVersion = null; diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/MasterDataGatherer.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/MasterDataGatherer.java index cd9c66d18f0..f952f842151 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/MasterDataGatherer.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/MasterDataGatherer.java @@ -51,7 +51,7 @@ public class MasterDataGatherer { public void process(WatchedEvent watchedEvent) { switch (watchedEvent.getType()) { case NodeChildrenChanged: // Fleetcontrollers have either connected or disconnected to ZooKeeper - log.log(LogLevel.INFO, "Fleetcontroller " + nodeIndex + ": A change occured in the list of registered fleetcontrollers. Requesting new information"); + log.log(LogLevel.INFO, "Fleetcontroller " + nodeIndex + ": A change occurred in the list of registered fleetcontrollers. Requesting new information"); session.getChildren(zooKeeperRoot + "indexes", this, childListener, null); break; case NodeDataChanged: // A fleetcontroller has changed what node it is voting for @@ -160,7 +160,7 @@ public class MasterDataGatherer { } } - /** Calling restart, ignores what we currently know and starts another circly. Typically called after reconnecting to ZooKeeperServer. */ + /** Calling restart, ignores what we currently know and starts another cycle. Typically called after reconnecting to ZooKeeperServer. */ public void restart() { synchronized (nextMasterData) { masterData = new TreeMap<Integer, Integer>(); diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java index 46fb18180e5..9619a15de3c 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java @@ -100,8 +100,7 @@ public class RpcServer { register = new Register(supervisor, slist, new Spec(InetAddress.getLocalHost().getHostName(), acceptor.port()), slobrokBackOffPolicy); } else { - register = new Register(supervisor, slist, - InetAddress.getLocalHost().getHostName(), acceptor.port()); + register = new Register(supervisor, slist, InetAddress.getLocalHost().getHostName(), acceptor.port()); } register.registerName(getSlobrokName()); } diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/ClusterStateRequestHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/ClusterStateRequestHandler.java index 6de9205bbe3..9428370faf5 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/ClusterStateRequestHandler.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/ClusterStateRequestHandler.java @@ -2,19 +2,19 @@ package com.yahoo.vespa.clustercontroller.core.status; import com.yahoo.vdslib.state.ClusterState; +import com.yahoo.vespa.clustercontroller.core.StateVersionTracker; import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageResponse; import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageServer; -import com.yahoo.vespa.clustercontroller.core.SystemStateGenerator; public class ClusterStateRequestHandler implements StatusPageServer.RequestHandler { - private final SystemStateGenerator systemStateGenerator; + private final StateVersionTracker stateVersionTracker; - public ClusterStateRequestHandler(SystemStateGenerator systemStateGenerator) { - this.systemStateGenerator = systemStateGenerator; + public ClusterStateRequestHandler(StateVersionTracker stateVersionTracker) { + this.stateVersionTracker = stateVersionTracker; } @Override public StatusPageResponse handle(StatusPageServer.HttpRequest request) { - ClusterState cs = systemStateGenerator.getClusterState(); + ClusterState cs = stateVersionTracker.getVersionedClusterState(); StatusPageResponse response = new StatusPageResponse(); response.setContentType("text/plain"); diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java index 85db0ac0ef9..ec75ba3532d 100644 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java +++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java @@ -17,21 +17,22 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa private final Timer timer; private final ContentCluster cluster; private final MasterElectionHandler masterElectionHandler; - private final SystemStateGenerator systemStateGenerator; + private final StateVersionTracker stateVersionTracker; private final EventLog eventLog; private final long startedTime; private final RunDataExtractor data; private boolean showLocalSystemStatesInLog = true; public LegacyIndexPageRequestHandler(Timer timer, boolean showLocalSystemStatesInLog, ContentCluster cluster, - MasterElectionHandler masterElectionHandler, SystemStateGenerator systemStateGenerator, + MasterElectionHandler masterElectionHandler, + StateVersionTracker stateVersionTracker, EventLog eventLog, long startedTime, RunDataExtractor data) { this.timer = timer; this.showLocalSystemStatesInLog = showLocalSystemStatesInLog; this.cluster = cluster; this.masterElectionHandler = masterElectionHandler; - this.systemStateGenerator = systemStateGenerator; + this.stateVersionTracker = stateVersionTracker; this.eventLog = eventLog; this.startedTime = startedTime; this.data = data; @@ -63,7 +64,7 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa new VdsClusterHtmlRendrer(), content, timer, - systemStateGenerator.getClusterState(), + stateVersionTracker.getVersionedClusterState(), data.getOptions().storageDistribution, data.getOptions(), eventLog, @@ -71,7 +72,7 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa // Overview of current config data.getOptions().writeHtmlState(content, request); // Current cluster state and cluster state history - writeHtmlState(systemStateGenerator, content, request); + writeHtmlState(stateVersionTracker, content, request); } else { // Overview of current config data.getOptions().writeHtmlState(content, request); @@ -84,7 +85,7 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa return response; } - public void writeHtmlState(SystemStateGenerator systemStateGenerator, StringBuilder sb, StatusPageServer.HttpRequest request) { + public void writeHtmlState(StateVersionTracker stateVersionTracker, StringBuilder sb, StatusPageServer.HttpRequest request) { boolean showLocal = showLocalSystemStatesInLog; if (request.hasQueryParameter("showlocal")) { showLocal = true; @@ -93,9 +94,9 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa } sb.append("<h2 id=\"clusterstates\">Cluster states</h2>\n") - .append("<p>Current cluster state:<br><code>").append(systemStateGenerator.currentClusterStateView().toString()).append("</code></p>\n"); + .append("<p>Current cluster state:<br><code>").append(stateVersionTracker.getVersionedClusterState().toString()).append("</code></p>\n"); - if ( ! systemStateGenerator.systemStateHistory().isEmpty()) { + if ( ! stateVersionTracker.getClusterStateHistory().isEmpty()) { TimeZone tz = TimeZone.getTimeZone("UTC"); sb.append("<h3 id=\"clusterstatehistory\">Cluster state history</h3>\n"); if (showLocal) { @@ -106,10 +107,10 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa .append(" <th>Cluster state</th>\n") .append("</tr>\n"); // Write cluster state history in reverse order (newest on top) - Iterator<SystemStateGenerator.SystemStateHistoryEntry> stateIterator = systemStateGenerator.systemStateHistory().iterator(); - SystemStateGenerator.SystemStateHistoryEntry current = null; + Iterator<ClusterStateHistoryEntry> stateIterator = stateVersionTracker.getClusterStateHistory().iterator(); + ClusterStateHistoryEntry current = null; while (stateIterator.hasNext()) { - SystemStateGenerator.SystemStateHistoryEntry nextEntry = stateIterator.next(); + ClusterStateHistoryEntry nextEntry = stateIterator.next(); if (nextEntry.state().isOfficial() || showLocal) { if (current != null) writeClusterStateEntry(current, nextEntry, sb, tz); current = nextEntry; @@ -120,7 +121,7 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa } } - private void writeClusterStateEntry(SystemStateGenerator.SystemStateHistoryEntry entry, SystemStateGenerator.SystemStateHistoryEntry last, StringBuilder sb, TimeZone tz) { + private void writeClusterStateEntry(ClusterStateHistoryEntry entry, ClusterStateHistoryEntry last, StringBuilder sb, TimeZone tz) { sb.append("<tr><td>").append(RealTimer.printDate(entry.time(), tz)) .append("</td><td>").append(entry.state().isOfficial() ? "" : "<font color=\"grey\">"); sb.append(entry.state()); diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StaticResourceRequestHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StaticResourceRequestHandler.java deleted file mode 100644 index fa8128753f6..00000000000 --- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StaticResourceRequestHandler.java +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.clustercontroller.core.status; - -import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageResponse; -import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageServer; - -import java.io.ByteArrayOutputStream; -import java.io.InputStream; -import java.io.IOException; - -/** - * HTTP request handler for serving a single JAR resource as if it were - * a regular file hosted on the server. Always serves the content verbatim - * (i.e. as a byte stream), specifying a Content-Type provided when creating - * the handler. - * - * @author <a href="mailto:vekterli@yahoo-inc.com">Tor Brede Vekterli</a> - * @since 5.28 - */ -public class StaticResourceRequestHandler implements StatusPageServer.RequestHandler { - private final byte[] resourceData; - private final String contentType; - - public StaticResourceRequestHandler(String resourcePath, - String contentType) - throws IOException - { - this.resourceData = loadResource(resourcePath); - this.contentType = contentType; - } - - private byte[] loadResource(String resourcePath) throws IOException { - InputStream resourceStream = getClass().getClassLoader().getResourceAsStream(resourcePath); - if (resourceStream == null) { - throw new IOException("No resource with path '" + resourcePath + "' could be found"); - } - return readStreamData(resourceStream); - } - - @Override - public StatusPageResponse handle(StatusPageServer.HttpRequest request) { - final StatusPageResponse response = new StatusPageResponse(); - response.setClientCachingEnabled(true); - response.setContentType(contentType); - try { - response.getOutputStream().write(resourceData); - } catch (IOException e) { - response.setResponseCode(StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR); - } - return response; - } - - private byte[] readStreamData(InputStream resourceStream) throws IOException { - final byte[] buf = new byte[4096]; - final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - while (true) { - int read = resourceStream.read(buf); - if (read < 0) { - break; - } - outputStream.write(buf, 0, read); - } - outputStream.close(); - return outputStream.toByteArray(); - } -} diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java index aca26000931..3eda886e721 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java @@ -3,21 +3,18 @@ package com.yahoo.vespa.clustercontroller.core; import com.yahoo.vdslib.distribution.ConfiguredNode; import com.yahoo.vdslib.distribution.Distribution; +import com.yahoo.vdslib.state.ClusterState; import com.yahoo.vdslib.state.Node; import com.yahoo.vdslib.state.NodeState; import com.yahoo.vdslib.state.NodeType; import com.yahoo.vdslib.state.State; import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChangeHandler; -import com.yahoo.vespa.clustercontroller.core.mocks.TestEventLog; import com.yahoo.vespa.clustercontroller.utils.util.NoMetricReporter; -import com.yahoo.vespa.config.content.StorDistributionConfig; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.TreeMap; -import java.util.stream.Collectors; -import java.util.stream.IntStream; import static org.mockito.Mockito.mock; @@ -26,98 +23,163 @@ class ClusterFixture { public final Distribution distribution; public final FakeTimer timer; public final EventLogInterface eventLog; - public final SystemStateGenerator generator; + public final StateChangeHandler nodeStateChangeHandler; + public final ClusterStateGenerator.Params params = new ClusterStateGenerator.Params(); - public ClusterFixture(ContentCluster cluster, Distribution distribution) { + ClusterFixture(ContentCluster cluster, Distribution distribution) { this.cluster = cluster; this.distribution = distribution; this.timer = new FakeTimer(); this.eventLog = mock(EventLogInterface.class); - this.generator = createGeneratorForFixtureCluster(); + this.nodeStateChangeHandler = createNodeStateChangeHandlerForCluster(); + this.params.cluster(this.cluster); } - public SystemStateGenerator createGeneratorForFixtureCluster() { + StateChangeHandler createNodeStateChangeHandlerForCluster() { final int controllerIndex = 0; MetricUpdater metricUpdater = new MetricUpdater(new NoMetricReporter(), controllerIndex); - SystemStateGenerator generator = new SystemStateGenerator(timer, eventLog, metricUpdater); - generator.setNodes(cluster.clusterInfo()); - generator.setDistribution(distribution); - return generator; + return new StateChangeHandler(timer, eventLog, metricUpdater); } - public void bringEntireClusterUp() { + ClusterFixture bringEntireClusterUp() { cluster.clusterInfo().getConfiguredNodes().forEach((idx, node) -> { reportStorageNodeState(idx, State.UP); reportDistributorNodeState(idx, State.UP); }); + return this; } - public void reportStorageNodeState(final int index, State state) { - final Node node = new Node(NodeType.STORAGE, index); - final NodeState nodeState = new NodeState(NodeType.STORAGE, state); - nodeState.setDescription("mockdesc"); + ClusterFixture markEntireClusterDown() { + cluster.clusterInfo().getConfiguredNodes().forEach((idx, node) -> { + reportStorageNodeState(idx, State.DOWN); + reportDistributorNodeState(idx, State.DOWN); + }); + return this; + } + + private void doReportNodeState(final Node node, final NodeState nodeState) { + final ClusterState stateBefore = rawGeneratedClusterState(); + NodeStateOrHostInfoChangeHandler handler = mock(NodeStateOrHostInfoChangeHandler.class); NodeInfo nodeInfo = cluster.getNodeInfo(node); - generator.handleNewReportedNodeState(nodeInfo, nodeState, handler); + nodeStateChangeHandler.handleNewReportedNodeState(stateBefore, nodeInfo, nodeState, handler); nodeInfo.setReportedState(nodeState, timer.getCurrentTimeInMillis()); } - public void reportStorageNodeState(final int index, NodeState nodeState) { + ClusterFixture reportStorageNodeState(final int index, State state, String description) { final Node node = new Node(NodeType.STORAGE, index); - final NodeInfo nodeInfo = cluster.getNodeInfo(node); - final long mockTime = 1234; - NodeStateOrHostInfoChangeHandler changeListener = mock(NodeStateOrHostInfoChangeHandler.class); - generator.handleNewReportedNodeState(nodeInfo, nodeState, changeListener); - nodeInfo.setReportedState(nodeState, mockTime); + final NodeState nodeState = new NodeState(NodeType.STORAGE, state); + nodeState.setDescription(description); + doReportNodeState(node, nodeState); + return this; } - public void reportDistributorNodeState(final int index, State state) { + ClusterFixture reportStorageNodeState(final int index, State state) { + return reportStorageNodeState(index, state, "mockdesc"); + } + + ClusterFixture reportStorageNodeState(final int index, NodeState nodeState) { + doReportNodeState(new Node(NodeType.STORAGE, index), nodeState); + return this; + } + + ClusterFixture reportDistributorNodeState(final int index, State state) { final Node node = new Node(NodeType.DISTRIBUTOR, index); final NodeState nodeState = new NodeState(NodeType.DISTRIBUTOR, state); - NodeStateOrHostInfoChangeHandler handler = mock(NodeStateOrHostInfoChangeHandler.class); + doReportNodeState(node, nodeState); + return this; + } + + ClusterFixture reportDistributorNodeState(final int index, NodeState nodeState) { + doReportNodeState(new Node(NodeType.DISTRIBUTOR, index), nodeState); + return this; + } + + private void doProposeWantedState(final Node node, final NodeState nodeState, String description) { + final ClusterState stateBefore = rawGeneratedClusterState(); + + nodeState.setDescription(description); NodeInfo nodeInfo = cluster.getNodeInfo(node); + nodeInfo.setWantedState(nodeState); - generator.handleNewReportedNodeState(nodeInfo, nodeState, handler); - nodeInfo.setReportedState(nodeState, timer.getCurrentTimeInMillis()); + nodeStateChangeHandler.proposeNewNodeState(stateBefore, nodeInfo, nodeState); } - public void proposeStorageNodeWantedState(final int index, State state) { + ClusterFixture proposeStorageNodeWantedState(final int index, State state, String description) { final Node node = new Node(NodeType.STORAGE, index); final NodeState nodeState = new NodeState(NodeType.STORAGE, state); + doProposeWantedState(node, nodeState, description); + return this; + } + + ClusterFixture proposeStorageNodeWantedState(final int index, State state) { + return proposeStorageNodeWantedState(index, state, "mockdesc"); + } + + ClusterFixture proposeDistributorWantedState(final int index, State state) { + final ClusterState stateBefore = rawGeneratedClusterState(); + final Node node = new Node(NodeType.DISTRIBUTOR, index); + final NodeState nodeState = new NodeState(NodeType.DISTRIBUTOR, state); nodeState.setDescription("mockdesc"); NodeInfo nodeInfo = cluster.getNodeInfo(node); nodeInfo.setWantedState(nodeState); - generator.proposeNewNodeState(nodeInfo, nodeState); + nodeStateChangeHandler.proposeNewNodeState(stateBefore, nodeInfo, nodeState); + return this; + } + ClusterFixture disableAutoClusterTakedown() { + setMinNodesUp(0, 0, 0.0, 0.0); + return this; } - public void disableAutoClusterTakedown() { - generator.setMinNodesUp(0, 0, 0.0, 0.0); + ClusterFixture setMinNodesUp(int minDistNodes, int minStorNodes, double minDistRatio, double minStorRatio) { + params.minStorageNodesUp(minStorNodes) + .minDistributorNodesUp(minDistNodes) + .minRatioOfStorageNodesUp(minStorRatio) + .minRatioOfDistributorNodesUp(minDistRatio); + return this; } - public void disableTransientMaintenanceModeOnDown() { - Map<NodeType, Integer> maxTransitionTime = new TreeMap<>(); - maxTransitionTime.put(NodeType.DISTRIBUTOR, 0); - maxTransitionTime.put(NodeType.STORAGE, 0); - generator.setMaxTransitionTime(maxTransitionTime); + ClusterFixture setMinNodeRatioPerGroup(double upRatio) { + params.minNodeRatioPerGroup(upRatio); + return this; } - public void enableTransientMaintenanceModeOnDown(final int transitionTime) { + static Map<NodeType, Integer> buildTransitionTimeMap(int distributorTransitionTime, int storageTransitionTime) { Map<NodeType, Integer> maxTransitionTime = new TreeMap<>(); - maxTransitionTime.put(NodeType.DISTRIBUTOR, transitionTime); - maxTransitionTime.put(NodeType.STORAGE, transitionTime); - generator.setMaxTransitionTime(maxTransitionTime); + maxTransitionTime.put(NodeType.DISTRIBUTOR, distributorTransitionTime); + maxTransitionTime.put(NodeType.STORAGE, storageTransitionTime); + return maxTransitionTime; } - public String generatedClusterState() { - return generator.getClusterState().toString(); + void disableTransientMaintenanceModeOnDown() { + this.params.transitionTimes(0); } - public String verboseGeneratedClusterState() { return generator.getClusterState().toString(true); } + void enableTransientMaintenanceModeOnDown(final int transitionTimeMs) { + this.params.transitionTimes(transitionTimeMs); + } + + AnnotatedClusterState annotatedGeneratedClusterState() { + params.currentTimeInMilllis(timer.getCurrentTimeInMillis()); + return ClusterStateGenerator.generatedStateFrom(params); + } - public static ClusterFixture forFlatCluster(int nodeCount) { + ClusterState rawGeneratedClusterState() { + return annotatedGeneratedClusterState().getClusterState(); + } + + String generatedClusterState() { + return annotatedGeneratedClusterState().getClusterState().toString(); + } + + String verboseGeneratedClusterState() { + return annotatedGeneratedClusterState().getClusterState().toString(true); + } + + static ClusterFixture forFlatCluster(int nodeCount) { Collection<ConfiguredNode> nodes = DistributionBuilder.buildConfiguredNodes(nodeCount); Distribution distribution = DistributionBuilder.forFlatCluster(nodeCount); @@ -126,11 +188,27 @@ class ClusterFixture { return new ClusterFixture(cluster, distribution); } - public static ClusterFixture forHierarchicCluster(DistributionBuilder.GroupBuilder root) { + static ClusterFixture forHierarchicCluster(DistributionBuilder.GroupBuilder root) { List<ConfiguredNode> nodes = DistributionBuilder.buildConfiguredNodes(root.totalNodeCount()); Distribution distribution = DistributionBuilder.forHierarchicCluster(root); ContentCluster cluster = new ContentCluster("foo", nodes, distribution, 0, 0.0); return new ClusterFixture(cluster, distribution); } + + ClusterStateGenerator.Params generatorParams() { + return new ClusterStateGenerator.Params().cluster(cluster); + } + + ContentCluster cluster() { + return this.cluster; + } + + static Node storageNode(int index) { + return new Node(NodeType.STORAGE, index); + } + + static Node distributorNode(int index) { + return new Node(NodeType.DISTRIBUTOR, index); + } } diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java new file mode 100644 index 00000000000..b9b97c27949 --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java @@ -0,0 +1,895 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import com.yahoo.vdslib.distribution.ConfiguredNode; +import com.yahoo.vdslib.state.DiskState; +import com.yahoo.vdslib.state.Node; +import com.yahoo.vdslib.state.NodeState; +import com.yahoo.vdslib.state.NodeType; +import com.yahoo.vdslib.state.State; +import org.junit.Test; + +import java.util.List; +import java.util.Optional; + +import static com.yahoo.vespa.clustercontroller.core.matchers.HasStateReasonForNode.hasStateReasonForNode; +import static com.yahoo.vespa.clustercontroller.core.ClusterFixture.storageNode; + +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertThat; + +public class ClusterStateGeneratorTest { + + private static AnnotatedClusterState generateFromFixtureWithDefaultParams(ClusterFixture fixture) { + final ClusterStateGenerator.Params params = new ClusterStateGenerator.Params(); + params.cluster = fixture.cluster; + params.transitionTimes = ClusterFixture.buildTransitionTimeMap(0, 0); + params.currentTimeInMillis = 0; + return ClusterStateGenerator.generatedStateFrom(params); + } + + @Test + public void cluster_with_all_nodes_reported_down_has_state_down() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(6).markEntireClusterDown(); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.getClusterState().getClusterState(), is(State.DOWN)); + // The returned message in this case depends on which "is cluster down?" check + // kicks in first. Currently, the minimum storage node count does. + assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_FEW_STORAGE_NODES_AVAILABLE))); + } + + @Test + public void cluster_with_all_nodes_up_state_correct_distributor_and_storage_count() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(6).bringEntireClusterUp(); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:6 storage:6")); + } + + @Test + public void distributor_reported_states_reflected_in_generated_state() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(9) + .bringEntireClusterUp() + .reportDistributorNodeState(2, State.DOWN) + .reportDistributorNodeState(4, State.STOPPING); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:9 .2.s:d .4.s:s storage:9")); + } + + // NOTE: initializing state tested separately since it involves init progress state info + @Test + public void storage_reported_states_reflected_in_generated_state() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(9) + .bringEntireClusterUp() + .reportStorageNodeState(0, State.DOWN) + .reportStorageNodeState(4, State.STOPPING); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:9 storage:9 .0.s:d .4.s:s")); + } + + @Test + public void storage_reported_disk_state_included_in_generated_state() { + final NodeState stateWithDisks = new NodeState(NodeType.STORAGE, State.UP); + stateWithDisks.setDiskCount(7); + stateWithDisks.setDiskState(5, new DiskState(State.DOWN)); + + final ClusterFixture fixture = ClusterFixture.forFlatCluster(9) + .bringEntireClusterUp() + .reportStorageNodeState(2, stateWithDisks); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:9 storage:9 .2.d:7 .2.d.5.s:d")); + } + + @Test + public void worse_distributor_wanted_state_overrides_reported_state() { + // Maintenance mode is illegal for distributors and therefore not tested + final ClusterFixture fixture = ClusterFixture.forFlatCluster(7) + .bringEntireClusterUp() + .proposeDistributorWantedState(5, State.DOWN) // Down worse than Up + .reportDistributorNodeState(2, State.STOPPING) + .proposeDistributorWantedState(2, State.DOWN); // Down worse than Stopping + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:7 .2.s:d .5.s:d storage:7")); + } + + @Test + public void worse_storage_wanted_state_overrides_reported_state() { + // Does not test all maintenance mode overrides; see maintenance_mode_overrides_reported_state + // for that. + final ClusterFixture fixture = ClusterFixture.forFlatCluster(7) + .bringEntireClusterUp() + .reportStorageNodeState(2, State.STOPPING) + .proposeStorageNodeWantedState(2, State.MAINTENANCE) // Maintenance worse than Stopping + .proposeStorageNodeWantedState(4, State.RETIRED) // Retired is "worse" than Up + .proposeStorageNodeWantedState(5, State.DOWN); // Down worse than Up + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:7 storage:7 .2.s:m .4.s:r .5.s:d")); + } + + @Test + public void better_distributor_wanted_state_does_not_override_reported_state() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(7) + .bringEntireClusterUp() + .reportDistributorNodeState(0, State.DOWN) + .proposeDistributorWantedState(0, State.UP); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:7 .0.s:d storage:7")); + } + + @Test + public void better_storage_wanted_state_does_not_override_reported_state() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(7) + .bringEntireClusterUp() + .reportStorageNodeState(1, State.DOWN) + .proposeStorageNodeWantedState(1, State.UP) + .reportStorageNodeState(2, State.DOWN) + .proposeStorageNodeWantedState(2, State.RETIRED); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:7 storage:7 .1.s:d .2.s:d")); + } + + /** + * If we let a Retired node be published as Initializing when it is in init state, we run + * the risk of having both feed and merge ops be sent towards it, which is not what we want. + * Consequently we pretend such nodes are never in init state and just transition them + * directly from Maintenance -> Up. + */ + @Test + public void retired_node_in_init_state_is_set_to_maintenance() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(1, State.INITIALIZING) + .proposeStorageNodeWantedState(1, State.RETIRED); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:3 storage:3 .1.s:m")); + } + + /** + * A storage node will report itself as being in initializing mode immediately when + * starting up. It can only accept external operations once it has finished listing + * the set of buckets (but not necessarily their contents). As a consequence of this, + * we have to map reported init state while bucket listing mode to Down. This will + * prevent clients from thinking they can use the node and prevent distributors form + * trying to fetch yet non-existent bucket sets from it. + * + * Detecting the bucket-listing stage is currently done by inspecting its init progress + * value and triggering on a sufficiently low value. + */ + @Test + public void storage_node_in_init_mode_while_listing_buckets_is_marked_down() { + final NodeState initWhileListingBuckets = new NodeState(NodeType.STORAGE, State.INITIALIZING); + initWhileListingBuckets.setInitProgress(0.0); + + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(1, initWhileListingBuckets); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("distributor:3 storage:3 .1.s:d")); + } + + /** + * Implicit down while reported as init should not kick into effect if the Wanted state + * is set to Maintenance. + */ + @Test + public void implicit_down_while_listing_buckets_does_not_override_wanted_state() { + final NodeState initWhileListingBuckets = new NodeState(NodeType.STORAGE, State.INITIALIZING); + initWhileListingBuckets.setInitProgress(0.0); + + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(1, initWhileListingBuckets) + .proposeStorageNodeWantedState(1, State.MAINTENANCE); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("distributor:3 storage:3 .1.s:m")); + } + + @Test + public void distributor_nodes_in_init_mode_are_not_mapped_to_down() { + final NodeState initWhileListingBuckets = new NodeState(NodeType.DISTRIBUTOR, State.INITIALIZING); + initWhileListingBuckets.setInitProgress(0.0); + + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportDistributorNodeState(1, initWhileListingBuckets); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("distributor:3 .1.s:i .1.i:0.0 storage:3")); + } + + /** + * Maintenance mode overrides all reported states, even Down. + */ + @Test + public void maintenance_mode_wanted_state_overrides_reported_state() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(7) + .bringEntireClusterUp() + .proposeStorageNodeWantedState(0, State.MAINTENANCE) + .reportStorageNodeState(2, State.STOPPING) + .proposeStorageNodeWantedState(2, State.MAINTENANCE) + .reportStorageNodeState(3, State.DOWN) + .proposeStorageNodeWantedState(3, State.MAINTENANCE) + .reportStorageNodeState(4, State.INITIALIZING) + .proposeStorageNodeWantedState(4, State.MAINTENANCE); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:7 storage:7 .0.s:m .2.s:m .3.s:m .4.s:m")); + } + + @Test + public void wanted_state_description_carries_over_to_generated_state() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(7) + .bringEntireClusterUp() + .proposeStorageNodeWantedState(1, State.MAINTENANCE, "foo") + .proposeStorageNodeWantedState(2, State.DOWN, "bar") + .proposeStorageNodeWantedState(3, State.RETIRED, "baz"); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + // We have to use toString(true) to get verbose printing including the descriptions, + // as these are omitted by default. + assertThat(state.toString(true), equalTo("distributor:7 storage:7 .1.s:m .1.m:foo " + + ".2.s:d .2.m:bar .3.s:r .3.m:baz")); + } + + @Test + public void reported_disk_state_not_hidden_by_wanted_state() { + final NodeState stateWithDisks = new NodeState(NodeType.STORAGE, State.UP); + stateWithDisks.setDiskCount(5); + stateWithDisks.setDiskState(3, new DiskState(State.DOWN)); + + final ClusterFixture fixture = ClusterFixture.forFlatCluster(9) + .bringEntireClusterUp() + .reportStorageNodeState(2, stateWithDisks) + .proposeStorageNodeWantedState(2, State.RETIRED) + .reportStorageNodeState(3, stateWithDisks) + .proposeStorageNodeWantedState(3, State.MAINTENANCE); + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + // We do not publish disk states for nodes in Down state. This differs from how the + // legacy controller did things, but such states cannot be counted on for ideal state + // calculations either way. In particular, reported disk states are not persisted and + // only exist transiently in the cluster controller's memory. A controller restart is + // sufficient to clear all disk states that have been incidentally remembered for now + // downed nodes. + // The keen reader may choose to convince themselves of this independently by reading the + // code in com.yahoo.vdslib.distribution.Distribution#getIdealStorageNodes and observing + // how disk states for nodes that are in a down-state are never considered. + assertThat(state.toString(), equalTo("distributor:9 storage:9 .2.s:r .2.d:5 .2.d.3.s:d " + + ".3.s:m .3.d:5 .3.d.3.s:d")); + } + + @Test + public void config_retired_mode_is_reflected_in_generated_state() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp(); + List<ConfiguredNode> nodes = DistributionBuilder.buildConfiguredNodes(5); + nodes.set(2, new ConfiguredNode(2, true)); + fixture.cluster.setNodes(nodes); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + + assertThat(state.toString(), equalTo("distributor:5 storage:5 .2.s:r")); + } + + private void do_test_change_within_node_transition_time_window_generates_maintenance(State reportedState) { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp(); + final ClusterStateGenerator.Params params = fixture.generatorParams() + .currentTimeInMilllis(10_000) + .transitionTimes(2000); + + fixture.reportStorageNodeState(1, reportedState); + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); + // Node 1 transitioned to reported `reportedState` at time 9000ms after epoch. This means that according to the + // above transition time config, it should remain in generated maintenance mode until time 11000ms, + // at which point it should finally transition to generated state Down. + nodeInfo.setTransitionTime(9000); + { + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:5 storage:5 .1.s:m")); + } + + nodeInfo.setTransitionTime(10999); + { + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:5 storage:5 .1.s:m")); + } + } + + @Test + public void reported_down_node_within_transition_time_has_maintenance_generated_state() { + do_test_change_within_node_transition_time_window_generates_maintenance(State.DOWN); + } + + @Test + public void reported_stopping_node_within_transition_time_has_maintenance_generated_state() { + do_test_change_within_node_transition_time_window_generates_maintenance(State.STOPPING); + } + + @Test + public void reported_node_down_after_transition_time_has_down_generated_state() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp(); + final ClusterStateGenerator.Params params = fixture.generatorParams() + .currentTimeInMilllis(11_000) + .transitionTimes(2000); + + fixture.reportStorageNodeState(1, State.DOWN); + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); + nodeInfo.setTransitionTime(9000); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:5 storage:5 .1.s:d")); + } + + @Test + public void distributor_nodes_are_not_implicitly_transitioned_to_maintenance_mode() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp(); + final ClusterStateGenerator.Params params = fixture.generatorParams() + .currentTimeInMilllis(10_000) + .transitionTimes(2000); + + fixture.reportDistributorNodeState(2, State.DOWN); + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.DISTRIBUTOR, 2)); + nodeInfo.setTransitionTime(9000); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:5 .2.s:d storage:5")); + } + + @Test + public void transient_maintenance_mode_does_not_override_wanted_down_state() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp(); + final ClusterStateGenerator.Params params = fixture.generatorParams() + .currentTimeInMilllis(10_000) + .transitionTimes(2000); + + fixture.proposeStorageNodeWantedState(2, State.DOWN); + fixture.reportStorageNodeState(2, State.DOWN); + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 2)); + nodeInfo.setTransitionTime(9000); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + // Should _not_ be in maintenance mode, since we explicitly want it to stay down. + assertThat(state.toString(), equalTo("distributor:5 storage:5 .2.s:d")); + } + + @Test + public void reported_down_retired_node_within_transition_time_transitions_to_maintenance() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp(); + final ClusterStateGenerator.Params params = fixture.generatorParams() + .currentTimeInMilllis(10_000) + .transitionTimes(2000); + + fixture.proposeStorageNodeWantedState(2, State.RETIRED); + fixture.reportStorageNodeState(2, State.DOWN); + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 2)); + nodeInfo.setTransitionTime(9000); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:5 storage:5 .2.s:m")); + } + + @Test + public void crash_count_exceeding_limit_marks_node_as_down() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp(); + final ClusterStateGenerator.Params params = fixture.generatorParams().maxPrematureCrashes(10); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 3)); + nodeInfo.setPrematureCrashCount(11); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:5 storage:5 .3.s:d")); + } + + @Test + public void crash_count_not_exceeding_limit_does_not_mark_node_as_down() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp(); + final ClusterStateGenerator.Params params = fixture.generatorParams().maxPrematureCrashes(10); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 3)); + nodeInfo.setPrematureCrashCount(10); // "Max crashes" range is inclusive + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:5 storage:5")); + } + + @Test + public void exceeded_crash_count_does_not_override_wanted_maintenance_state() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5) + .bringEntireClusterUp() + .proposeStorageNodeWantedState(1, State.MAINTENANCE); + final ClusterStateGenerator.Params params = fixture.generatorParams().maxPrematureCrashes(10); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); + nodeInfo.setPrematureCrashCount(11); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:5 storage:5 .1.s:m")); + } + + // Stopping -> Down is expected and does not indicate an unstable node. + @Test + public void transition_from_controlled_stop_to_down_does_not_add_to_crash_counter() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(2) + .bringEntireClusterUp() + .reportStorageNodeState(1, State.STOPPING, "controlled shutdown") // urgh, string matching logic + .reportStorageNodeState(1, State.DOWN); + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); + assertThat(nodeInfo.getPrematureCrashCount(), equalTo(0)); + } + + @Test + public void non_observed_storage_node_start_timestamp_is_included_in_state() { + final NodeState nodeState = new NodeState(NodeType.STORAGE, State.UP); + // A reported state timestamp that is not yet marked as observed in the NodeInfo + // for the same node is considered not observed by other nodes and must therefore + // be included in the generated cluster state + nodeState.setStartTimestamp(5000); + + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5) + .bringEntireClusterUp() + .reportStorageNodeState(0, nodeState); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("distributor:5 storage:5 .0.t:5000")); + } + + @Test + public void non_observed_distributor_start_timestamp_is_included_in_state() { + final NodeState nodeState = new NodeState(NodeType.DISTRIBUTOR, State.UP); + nodeState.setStartTimestamp(6000); + + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5) + .bringEntireClusterUp() + .reportDistributorNodeState(1, nodeState); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("distributor:5 .1.t:6000 storage:5")); + } + + @Test + public void fully_observed_storage_node_timestamp_not_included_in_state() { + final NodeState nodeState = new NodeState(NodeType.STORAGE, State.UP); + nodeState.setStartTimestamp(5000); + + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5) + .bringEntireClusterUp() + .reportStorageNodeState(0, nodeState); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0)); + nodeInfo.setStartTimestamp(5000); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("distributor:5 storage:5")); + } + + @Test + public void fully_observed_distributor_timestamp_not_included_in_state() { + final NodeState nodeState = new NodeState(NodeType.DISTRIBUTOR, State.UP); + nodeState.setStartTimestamp(6000); + + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5) + .bringEntireClusterUp() + .reportDistributorNodeState(0, nodeState); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.DISTRIBUTOR, 0)); + nodeInfo.setStartTimestamp(6000); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("distributor:5 storage:5")); + } + + @Test + public void cluster_down_if_less_than_min_count_of_storage_nodes_available() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, State.DOWN) + .reportStorageNodeState(2, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minStorageNodesUp(2); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("cluster:d distributor:3 storage:2 .0.s:d")); + assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_FEW_STORAGE_NODES_AVAILABLE))); + } + + @Test + public void cluster_not_down_if_more_than_min_count_of_storage_nodes_are_available() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minStorageNodesUp(2); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:3 storage:3 .0.s:d")); + assertThat(state.getClusterStateReason(), equalTo(Optional.empty())); + } + + @Test + public void cluster_down_if_less_than_min_count_of_distributors_available() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportDistributorNodeState(0, State.DOWN) + .reportDistributorNodeState(2, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minDistributorNodesUp(2); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("cluster:d distributor:2 .0.s:d storage:3")); + assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE))); + } + + @Test + public void cluster_not_down_if_more_than_min_count_of_distributors_are_available() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportDistributorNodeState(0, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minDistributorNodesUp(2); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:3 .0.s:d storage:3")); + assertThat(state.getClusterStateReason(), equalTo(Optional.empty())); + } + + @Test + public void maintenance_mode_counted_as_down_for_cluster_availability() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, State.DOWN) + .proposeStorageNodeWantedState(2, State.MAINTENANCE); + final ClusterStateGenerator.Params params = fixture.generatorParams().minStorageNodesUp(2); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("cluster:d distributor:3 storage:3 .0.s:d .2.s:m")); + } + + @Test + public void init_and_retired_counted_as_up_for_cluster_availability() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, State.INITIALIZING) + .proposeStorageNodeWantedState(1, State.RETIRED); + // Any node being treated as down should take down the cluster here + final ClusterStateGenerator.Params params = fixture.generatorParams().minStorageNodesUp(3); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:3 storage:3 .0.s:i .0.i:1.0 .1.s:r")); + } + + @Test + public void cluster_down_if_less_than_min_ratio_of_storage_nodes_available() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, State.DOWN) + .reportStorageNodeState(2, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfStorageNodesUp(0.5); + + // TODO de-dupe a lot of these tests? + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("cluster:d distributor:3 storage:2 .0.s:d")); + assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO))); + } + + @Test + public void cluster_not_down_if_more_than_min_ratio_of_storage_nodes_available() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, State.DOWN); + // Min node ratio is inclusive, i.e. 0.5 of 2 nodes is enough for cluster to be up. + final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfStorageNodesUp(0.5); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:3 storage:3 .0.s:d")); + assertThat(state.getClusterStateReason(), equalTo(Optional.empty())); + } + + @Test + public void cluster_down_if_less_than_min_ratio_of_distributors_available() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportDistributorNodeState(0, State.DOWN) + .reportDistributorNodeState(2, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfDistributorNodesUp(0.5); + + // TODO de-dupe a lot of these tests? + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("cluster:d distributor:2 .0.s:d storage:3")); + assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO))); + } + + @Test + public void cluster_not_down_if_more_than_min_ratio_of_distributors_available() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportDistributorNodeState(0, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfDistributorNodesUp(0.5); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:3 .0.s:d storage:3")); + assertThat(state.getClusterStateReason(), equalTo(Optional.empty())); + } + + @Test + public void group_nodes_are_marked_down_if_group_availability_too_low() { + final ClusterFixture fixture = ClusterFixture + .forHierarchicCluster(DistributionBuilder.withGroups(3).eachWithNodeCount(3)) + .bringEntireClusterUp() + .reportStorageNodeState(4, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.68); + + // Node 4 is down, which is more than 32% of nodes down in group #2. Nodes 3,5 should be implicitly + // marked down as it is in the same group. + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:9 storage:9 .3.s:d .4.s:d .5.s:d")); + } + + @Test + public void group_nodes_are_not_marked_down_if_group_availability_sufficiently_high() { + final ClusterFixture fixture = ClusterFixture + .forHierarchicCluster(DistributionBuilder.withGroups(3).eachWithNodeCount(3)) + .bringEntireClusterUp() + .reportStorageNodeState(4, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.65); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:9 storage:9 .4.s:d")); // No other nodes down implicitly + } + + @Test + public void implicitly_downed_group_nodes_receive_a_state_description() { + final ClusterFixture fixture = ClusterFixture + .forHierarchicCluster(DistributionBuilder.withGroups(2).eachWithNodeCount(2)) + .bringEntireClusterUp() + .reportStorageNodeState(3, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.51); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(true), equalTo("distributor:4 storage:4 " + + ".2.s:d .2.m:group\\x20node\\x20availability\\x20below\\x20configured\\x20threshold " + + ".3.s:d .3.m:mockdesc")); // Preserve description for non-implicitly taken down node + } + + @Test + public void implicitly_downed_group_nodes_are_annotated_with_group_reason() { + final ClusterFixture fixture = ClusterFixture + .forHierarchicCluster(DistributionBuilder.withGroups(2).eachWithNodeCount(2)) + .bringEntireClusterUp() + .reportStorageNodeState(3, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.51); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.getNodeStateReasons(), + hasStateReasonForNode(storageNode(2), NodeStateReason.GROUP_IS_DOWN)); + } + + @Test + public void maintenance_nodes_in_downed_group_are_not_affected() { + final ClusterFixture fixture = ClusterFixture + .forHierarchicCluster(DistributionBuilder.withGroups(3).eachWithNodeCount(3)) + .bringEntireClusterUp() + .proposeStorageNodeWantedState(3, State.MAINTENANCE) + .reportStorageNodeState(4, State.DOWN); + final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.68); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + // 4 is down by itself, 5 is down implicitly and 3 should happily stay in Maintenance mode. + // Side note: most special cases for when a node should and should not be affected by group + // down edges are covered in GroupAvailabilityCalculatorTest and GroupAutoTakedownTest. + // We test this case explicitly since it's an assurance that code integration works as expected. + assertThat(state.toString(), equalTo("distributor:9 storage:9 .3.s:m .4.s:d .5.s:d")); + } + + /** + * Cluster-wide distribution bit count cannot be higher than the lowest split bit + * count reported by the set of storage nodes. This is because the distribution bit + * directly impacts which level of the bucket tree is considered the root level, + * and any buckets caught over this level would not be accessible in the data space. + */ + @Test + public void distribution_bits_bounded_by_reported_min_bits_from_storage_node() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(1, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(7)); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("bits:7 distributor:3 storage:3")); + } + + @Test + public void distribution_bits_bounded_by_lowest_reporting_storage_node() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(6)) + .reportStorageNodeState(1, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(5)); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("bits:5 distributor:3 storage:3")); + } + + @Test + public void distribution_bits_bounded_by_config_parameter() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3).bringEntireClusterUp(); + + final ClusterStateGenerator.Params params = fixture.generatorParams().idealDistributionBits(12); + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("bits:12 distributor:3 storage:3")); + } + + // TODO do we really want this behavior? It's the legacy one, but it seems... dangerous.. Especially for maintenance + // TODO We generally want to avoid distribution bit decreases if at all possible, since "collapsing" + // the top-level bucket space can cause data loss on timestamp collisions across super buckets. + @Test + public void distribution_bit_not_influenced_by_nodes_down_or_in_maintenance() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(7)) + .reportStorageNodeState(1, new NodeState(NodeType.STORAGE, State.DOWN).setMinUsedBits(6)) + .reportStorageNodeState(2, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(5)) + .proposeStorageNodeWantedState(2, State.MAINTENANCE); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("bits:7 distributor:3 storage:3 .1.s:d .2.s:m")); + } + + private String do_test_distribution_bit_watermark(int lowestObserved, int node0MinUsedBits) { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(node0MinUsedBits)); + + final ClusterStateGenerator.Params params = fixture.generatorParams() + .highestObservedDistributionBitCount(8) // TODO is this even needed for our current purposes? + .lowestObservedDistributionBitCount(lowestObserved); + + return ClusterStateGenerator.generatedStateFrom(params).toString(); + } + + /** + * Distribution bit increases should not take place incrementally. Doing so would + * let e.g. a transition from 10 bits to 20 bits cause 10 interim full re-distributions. + */ + @Test + public void published_distribution_bit_bound_by_low_watermark_when_nodes_report_less_than_config_bits() { + assertThat(do_test_distribution_bit_watermark(5, 5), + equalTo("bits:5 distributor:3 storage:3")); + assertThat(do_test_distribution_bit_watermark(5, 6), + equalTo("bits:5 distributor:3 storage:3")); + assertThat(do_test_distribution_bit_watermark(5, 15), + equalTo("bits:5 distributor:3 storage:3")); + } + + @Test + public void published_state_jumps_to_configured_ideal_bits_when_all_nodes_report_it() { + // Note: the rest of the mocked nodes always report 16 bits by default + assertThat(do_test_distribution_bit_watermark(5, 16), + equalTo("distributor:3 storage:3")); // "bits:16" implied + } + + private String do_test_storage_node_with_no_init_progress(State wantedState) { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.5)) + .proposeStorageNodeWantedState(0, wantedState); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0)); + nodeInfo.setInitProgressTime(10_000); + + final ClusterStateGenerator.Params params = fixture.generatorParams() + .maxInitProgressTime(1000) + .currentTimeInMilllis(11_000); + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + return state.toString(); + } + + @Test + public void storage_node_with_no_init_progress_within_timeout_is_marked_down() { + assertThat(do_test_storage_node_with_no_init_progress(State.UP), + equalTo("distributor:3 storage:3 .0.s:d")); + } + + /** + * As per usual, we shouldn't transition implicitly to Down if Maintenance is set + * as the wanted state. + */ + @Test + public void maintenance_wanted_state_overrides_storage_node_with_no_init_progress() { + assertThat(do_test_storage_node_with_no_init_progress(State.MAINTENANCE), + equalTo("distributor:3 storage:3 .0.s:m")); + } + + /** + * Legacy behavior: if a node has crashed (i.e. transition into Down) at least once + * while in Init mode, its subsequent init mode will not be made public. + * This means the node will remain in a Down-state until it has finished + * initializing. This is presumably because unstable nodes may not be able to finish + * their init stage and would otherwise pop in and out of the cluster state. + */ + @Test + public void unstable_init_storage_node_has_init_state_substituted_by_down() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5) + .bringEntireClusterUp() + .reportStorageNodeState(0, State.INITIALIZING) + .reportStorageNodeState(0, State.DOWN) // Init -> Down triggers unstable init flag + .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.5)); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("distributor:5 storage:5 .0.s:d")); + } + + @Test + public void storage_node_with_crashes_but_not_unstable_init_does_not_have_init_state_substituted_by_down() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5) + .bringEntireClusterUp() + .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.5)); + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0)); + nodeInfo.setPrematureCrashCount(5); + + final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture); + assertThat(state.toString(), equalTo("distributor:5 storage:5 .0.s:i .0.i:0.5")); + } + + /** + * The generated state must be considered over the Reported state when deciding whether + * to override it with the Wanted state. Otherwise, an unstable retired node could have + * its generated state be Retired instead of Down. We want it to stay down instead of + * potentially contributing additional instability to the cluster. + */ + @Test + public void unstable_retired_node_should_be_marked_down() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(5) + .bringEntireClusterUp() + .proposeStorageNodeWantedState(3, State.RETIRED); + final ClusterStateGenerator.Params params = fixture.generatorParams().maxPrematureCrashes(10); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 3)); + nodeInfo.setPrematureCrashCount(11); + + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:5 storage:5 .3.s:d")); + } + + @Test + public void generator_params_can_inherit_values_from_controller_options() { + FleetControllerOptions options = new FleetControllerOptions("foocluster"); + options.maxPrematureCrashes = 1; + options.minStorageNodesUp = 2; + options.minDistributorNodesUp = 3; + options.minRatioOfStorageNodesUp = 0.4; + options.minRatioOfDistributorNodesUp = 0.5; + options.minNodeRatioPerGroup = 0.6; + options.distributionBits = 7; + options.maxTransitionTime = ClusterStateGenerator.Params.buildTransitionTimeMap(1000, 2000); + final ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options); + assertThat(params.maxPrematureCrashes, equalTo(options.maxPrematureCrashes)); + assertThat(params.minStorageNodesUp, equalTo(options.minStorageNodesUp)); + assertThat(params.minDistributorNodesUp, equalTo(options.minDistributorNodesUp)); + assertThat(params.minRatioOfStorageNodesUp, equalTo(options.minRatioOfStorageNodesUp)); + assertThat(params.minRatioOfDistributorNodesUp, equalTo(options.minRatioOfDistributorNodesUp)); + assertThat(params.minNodeRatioPerGroup, equalTo(options.minNodeRatioPerGroup)); + assertThat(params.transitionTimes, equalTo(options.maxTransitionTime)); + } + + @Test + public void configured_zero_init_progress_time_disables_auto_init_to_down_feature() { + final ClusterFixture fixture = ClusterFixture.forFlatCluster(3) + .bringEntireClusterUp() + .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.5)); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0)); + nodeInfo.setInitProgressTime(10_000); + + final ClusterStateGenerator.Params params = fixture.generatorParams() + .maxInitProgressTime(0) + .currentTimeInMilllis(11_000); + final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params); + assertThat(state.toString(), equalTo("distributor:3 storage:3 .0.s:i .0.i:0.5")); + } + +} diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java index 1adb0dcad7d..74661147085 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java @@ -74,13 +74,14 @@ public class DistributionBitCountTest extends FleetControllerTest { nodes.get(3).setNodeState(new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(11)); ClusterState startState = waitForState("version:\\d+ bits:11 distributor:10 storage:10"); - ClusterState state = waitForClusterStateIncludingNodesWithMinUsedBits(11, 2); nodes.get(1).setNodeState(new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(12)); - assertEquals(state + "->" + fleetController.getSystemState(), startState.getVersion(), fleetController.getSystemState().getVersion()); + assertEquals(startState + "->" + fleetController.getSystemState(), + startState.getVersion(), fleetController.getSystemState().getVersion()); for (int i = 0; i < 10; ++i) { - nodes.get(i).setNodeState(new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(17)); + // nodes is array of [distr.0, stor.0, distr.1, stor.1, ...] and we just want the storage nodes + nodes.get(i*2 + 1).setNodeState(new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(17)); } assertEquals(startState.getVersion() + 1, waitForState("version:\\d+ bits:17 distributor:10 storage:10").getVersion()); } diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculatorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculatorTest.java new file mode 100644 index 00000000000..2a5b3adcfe7 --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculatorTest.java @@ -0,0 +1,319 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import static com.yahoo.vespa.clustercontroller.core.matchers.EventForNode.eventForNode; +import static com.yahoo.vespa.clustercontroller.core.matchers.NodeEventWithDescription.nodeEventWithDescription; +import static com.yahoo.vespa.clustercontroller.core.matchers.ClusterEventWithDescription.clusterEventWithDescription; +import static com.yahoo.vespa.clustercontroller.core.matchers.EventTypeIs.eventTypeIs; +import static com.yahoo.vespa.clustercontroller.core.matchers.EventTimeIs.eventTimeIs; +import static org.hamcrest.CoreMatchers.allOf; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertThat; +import static org.hamcrest.CoreMatchers.hasItem; + +import static com.yahoo.vespa.clustercontroller.core.ClusterFixture.storageNode; +import static com.yahoo.vespa.clustercontroller.core.ClusterFixture.distributorNode; + +import com.yahoo.vdslib.state.ClusterState; +import com.yahoo.vdslib.state.Node; +import org.junit.Test; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +public class EventDiffCalculatorTest { + + private static Map<Node, NodeStateReason> emptyNodeStateReasons() { + return Collections.emptyMap(); + } + + private static class EventFixture { + final ClusterFixture clusterFixture; + // TODO could reasonably put shared state into a common class to avoid dupes for both before/after + Optional<ClusterStateReason> clusterReasonBefore = Optional.empty(); + Optional<ClusterStateReason> clusterReasonAfter = Optional.empty(); + ClusterState clusterStateBefore = ClusterState.emptyState(); + ClusterState clusterStateAfter = ClusterState.emptyState(); + final Map<Node, NodeStateReason> nodeReasonsBefore = new HashMap<>(); + final Map<Node, NodeStateReason> nodeReasonsAfter = new HashMap<>(); + long currentTimeMs = 0; + + EventFixture(int nodeCount) { + this.clusterFixture = ClusterFixture.forFlatCluster(nodeCount); + } + + EventFixture clusterStateBefore(String stateStr) { + clusterStateBefore = ClusterState.stateFromString(stateStr); + return this; + } + EventFixture clusterStateAfter(String stateStr) { + clusterStateAfter = ClusterState.stateFromString(stateStr); + return this; + } + EventFixture storageNodeReasonBefore(int index, NodeStateReason reason) { + nodeReasonsBefore.put(storageNode(index), reason); + return this; + } + EventFixture storageNodeReasonAfter(int index, NodeStateReason reason) { + nodeReasonsAfter.put(storageNode(index), reason); + return this; + } + EventFixture clusterReasonBefore(ClusterStateReason reason) { + this.clusterReasonBefore = Optional.of(reason); + return this; + } + EventFixture clusterReasonAfter(ClusterStateReason reason) { + this.clusterReasonAfter = Optional.of(reason); + return this; + } + EventFixture currentTimeMs(long timeMs) { + this.currentTimeMs = timeMs; + return this; + } + + List<Event> computeEventDiff() { + final AnnotatedClusterState stateBefore = new AnnotatedClusterState( + clusterStateBefore, clusterReasonBefore, nodeReasonsBefore); + final AnnotatedClusterState stateAfter = new AnnotatedClusterState( + clusterStateAfter, clusterReasonAfter, nodeReasonsAfter); + + return EventDiffCalculator.computeEventDiff( + EventDiffCalculator.params() + .cluster(clusterFixture.cluster()) + .fromState(stateBefore) + .toState(stateAfter) + .currentTimeMs(currentTimeMs)); + } + + static EventFixture createForNodes(int nodeCount) { + return new EventFixture(nodeCount); + } + + } + + @Test + public void single_storage_node_state_transition_emits_altered_node_state_event() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("distributor:3 storage:3 .0.s:d"); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + assertThat(events, hasItem(allOf( + eventForNode(storageNode(0)), + eventTypeIs(NodeEvent.Type.CURRENT), + nodeEventWithDescription("Altered node state in cluster state from 'U' to 'D'")))); + } + + @Test + public void single_distributor_node_state_transition_emits_altered_node_state_event() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("distributor:3 .1.s:d storage:3"); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + assertThat(events, hasItem(allOf( + eventForNode(distributorNode(1)), + eventTypeIs(NodeEvent.Type.CURRENT), + nodeEventWithDescription("Altered node state in cluster state from 'U' to 'D'")))); + } + + @Test + public void node_state_change_event_is_tagged_with_given_time() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("distributor:3 storage:3 .0.s:d") + .currentTimeMs(123456); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + assertThat(events, hasItem(eventTimeIs(123456))); + } + + @Test + public void multiple_node_state_transitions_emit_multiple_node_state_events() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3 .1.s:d") + .clusterStateAfter("distributor:3 .2.s:d storage:3 .0.s:r"); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(3)); + assertThat(events, hasItem(allOf( + eventForNode(distributorNode(2)), + nodeEventWithDescription("Altered node state in cluster state from 'U' to 'D'")))); + assertThat(events, hasItem(allOf( + eventForNode(storageNode(0)), + nodeEventWithDescription("Altered node state in cluster state from 'U' to 'R'")))); + assertThat(events, hasItem(allOf( + eventForNode(storageNode(1)), + nodeEventWithDescription("Altered node state in cluster state from 'D' to 'U'")))); + } + + @Test + public void no_emitted_node_state_event_when_node_state_not_changed() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("distributor:3 storage:3"); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(0)); + } + + @Test + public void node_down_edge_with_group_down_reason_has_separate_event_emitted() { + // We sneakily use a flat cluster here but still use a 'group down' reason. Differ doesn't currently care. + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("distributor:3 storage:3 .1.s:d") + .storageNodeReasonAfter(1, NodeStateReason.GROUP_IS_DOWN); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(2)); + // Both the regular edge event and the group down event is emitted + assertThat(events, hasItem(allOf( + eventForNode(storageNode(1)), + nodeEventWithDescription("Altered node state in cluster state from 'U' to 'D'")))); + assertThat(events, hasItem(allOf( + eventForNode(storageNode(1)), + eventTypeIs(NodeEvent.Type.CURRENT), + nodeEventWithDescription("Group node availability is below configured threshold")))); + } + + @Test + public void group_down_to_group_down_does_not_emit_new_event() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3 .1.s:d") + .clusterStateAfter("distributor:3 storage:3 .1.s:m") + .storageNodeReasonBefore(1, NodeStateReason.GROUP_IS_DOWN) + .storageNodeReasonAfter(1, NodeStateReason.GROUP_IS_DOWN); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + // Should not get a group availability event since nothing has changed in this regard + assertThat(events, hasItem(allOf( + eventForNode(storageNode(1)), + nodeEventWithDescription("Altered node state in cluster state from 'D' to 'M'")))); + } + + @Test + public void group_down_to_clear_reason_emits_group_up_event() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3 .2.s:d") + .clusterStateAfter("distributor:3 storage:3") + .storageNodeReasonBefore(2, NodeStateReason.GROUP_IS_DOWN); // But no after-reason. + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(2)); + assertThat(events, hasItem(allOf( + eventForNode(storageNode(2)), + nodeEventWithDescription("Altered node state in cluster state from 'D' to 'U'")))); + assertThat(events, hasItem(allOf( + eventForNode(storageNode(2)), + eventTypeIs(NodeEvent.Type.CURRENT), + nodeEventWithDescription("Group node availability has been restored")))); + } + + @Test + public void cluster_up_edge_emits_sufficient_node_availability_event() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("cluster:d distributor:3 storage:3") + .clusterStateAfter("distributor:3 storage:3"); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + assertThat(events, hasItem( + clusterEventWithDescription("Enough nodes available for system to become up"))); + } + + @Test + public void cluster_down_event_without_reason_annotation_emits_generic_down_event() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("cluster:d distributor:3 storage:3"); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + assertThat(events, hasItem( + clusterEventWithDescription("Cluster is down"))); + } + + @Test + public void cluster_event_is_tagged_with_given_time() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("cluster:d distributor:3 storage:3") + .currentTimeMs(56789); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + assertThat(events, hasItem(eventTimeIs(56789))); + } + + @Test + public void no_event_emitted_for_cluster_down_to_down_edge() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("cluster:d distributor:3 storage:3") + .clusterStateAfter("cluster:d distributor:3 storage:3"); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(0)); + } + + @Test + public void too_few_storage_nodes_cluster_down_reason_emits_corresponding_event() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("cluster:d distributor:3 storage:3") + .clusterReasonAfter(ClusterStateReason.TOO_FEW_STORAGE_NODES_AVAILABLE); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + // TODO(?) these messages currently don't include the current configured limits + assertThat(events, hasItem( + clusterEventWithDescription("Too few storage nodes available in cluster. Setting cluster state down"))); + } + + @Test + public void too_few_distributor_nodes_cluster_down_reason_emits_corresponding_event() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("cluster:d distributor:3 storage:3") + .clusterReasonAfter(ClusterStateReason.TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + assertThat(events, hasItem( + clusterEventWithDescription("Too few distributor nodes available in cluster. Setting cluster state down"))); + } + + @Test + public void too_low_storage_node_ratio_cluster_down_reason_emits_corresponding_event() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("cluster:d distributor:3 storage:3") + .clusterReasonAfter(ClusterStateReason.TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + assertThat(events, hasItem( + clusterEventWithDescription("Too low ratio of available storage nodes. Setting cluster state down"))); + } + + @Test + public void too_low_distributor_node_ratio_cluster_down_reason_emits_corresponding_event() { + final EventFixture fixture = EventFixture.createForNodes(3) + .clusterStateBefore("distributor:3 storage:3") + .clusterStateAfter("cluster:d distributor:3 storage:3") + .clusterReasonAfter(ClusterStateReason.TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO); + + final List<Event> events = fixture.computeEventDiff(); + assertThat(events.size(), equalTo(1)); + assertThat(events, hasItem( + clusterEventWithDescription("Too low ratio of available distributor nodes. Setting cluster state down"))); + } + +} diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java index f4b3e648f63..d0aa0bceba9 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java @@ -6,13 +6,11 @@ import com.yahoo.jrt.slobrok.server.Slobrok; import com.yahoo.log.LogLevel; import com.yahoo.log.LogSetup; import com.yahoo.vdslib.distribution.ConfiguredNode; -import com.yahoo.vdslib.distribution.Distribution; import com.yahoo.vdslib.state.ClusterState; import com.yahoo.vdslib.state.Node; import com.yahoo.vdslib.state.NodeState; import com.yahoo.vdslib.state.NodeType; import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler; -import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo; import com.yahoo.vespa.clustercontroller.core.rpc.RPCCommunicator; import com.yahoo.vespa.clustercontroller.core.rpc.RpcServer; import com.yahoo.vespa.clustercontroller.core.rpc.SlobrokClient; @@ -150,7 +148,7 @@ public abstract class FleetControllerTest implements Waiter { } RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); - SystemStateGenerator stateGenerator = new SystemStateGenerator(timer, log, metricUpdater); + StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); FleetController controller = new FleetController(timer, log, cluster, stateGatherer, communicator, status, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java index be60fba234a..a7307e0180a 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java @@ -9,19 +9,22 @@ import com.yahoo.vdslib.state.NodeType; import com.yahoo.vdslib.state.State; import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler; import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChangeHandler; -import com.yahoo.vespa.clustercontroller.core.listeners.SystemStateListener; + +import static com.yahoo.vespa.clustercontroller.core.matchers.EventForNode.eventForNode; +import static com.yahoo.vespa.clustercontroller.core.matchers.NodeEventWithDescription.nodeEventWithDescription; import org.junit.Test; -import org.mockito.ArgumentMatcher; import java.util.HashSet; +import java.util.List; import java.util.Set; import static org.hamcrest.core.AllOf.allOf; +import static org.hamcrest.core.IsCollectionContaining.hasItem; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.argThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -43,26 +46,29 @@ public class GroupAutoTakedownTest { } private static void setSharedFixtureOptions(ClusterFixture fixture, double minNodeRatioPerGroup) { - fixture.generator.setMinNodeRatioPerGroup(minNodeRatioPerGroup); + fixture.setMinNodeRatioPerGroup(minNodeRatioPerGroup); fixture.disableTransientMaintenanceModeOnDown(); fixture.disableAutoClusterTakedown(); fixture.bringEntireClusterUp(); } private String stateAfterStorageTransition(ClusterFixture fixture, final int index, final State state) { - transitionStoreNodeToState(fixture, index, state); + transitionStorageNodeToState(fixture, index, state); return fixture.generatedClusterState(); } private String verboseStateAfterStorageTransition(ClusterFixture fixture, final int index, final State state) { - transitionStoreNodeToState(fixture, index, state); + transitionStorageNodeToState(fixture, index, state); return fixture.verboseGeneratedClusterState(); } - private void transitionStoreNodeToState(ClusterFixture fixture, int index, State state) { + private void transitionStorageNodeToState(ClusterFixture fixture, int index, State state) { fixture.reportStorageNodeState(index, state); - SystemStateListener listener = mock(SystemStateListener.class); - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener)); + } + + private AnnotatedClusterState annotatedStateAfterStorageTransition(ClusterFixture fixture, final int index, final State state) { + transitionStorageNodeToState(fixture, index, state); + return fixture.annotatedGeneratedClusterState(); } /** @@ -74,12 +80,9 @@ public class GroupAutoTakedownTest { public void config_does_not_apply_to_flat_hierarchy_clusters() { ClusterFixture fixture = createFixtureForAllUpFlatCluster(5, 0.99); - SystemStateListener listener = mock(SystemStateListener.class); - // First invocation; generates initial state and clears "new state" flag - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener)); - assertEquals("version:1 distributor:5 storage:5", fixture.generatedClusterState()); + assertEquals("distributor:5 storage:5", fixture.generatedClusterState()); - assertEquals("version:2 distributor:5 storage:5 .1.s:d", + assertEquals("distributor:5 storage:5 .1.s:d", stateAfterStorageTransition(fixture, 1, State.DOWN)); } @@ -88,15 +91,13 @@ public class GroupAutoTakedownTest { ClusterFixture fixture = createFixtureForAllUpHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51); - SystemStateListener listener = mock(SystemStateListener.class); - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener)); - assertEquals("version:1 distributor:6 storage:6", fixture.generatedClusterState()); + assertEquals("distributor:6 storage:6", fixture.generatedClusterState()); // Same group as node 4 - assertEquals("version:2 distributor:6 storage:4", + assertEquals("distributor:6 storage:4", stateAfterStorageTransition(fixture, 5, State.DOWN)); // Same group as node 1 - assertEquals("version:3 distributor:6 storage:4 .0.s:d .1.s:d", + assertEquals("distributor:6 storage:4 .0.s:d .1.s:d", stateAfterStorageTransition(fixture, 0, State.DOWN)); } @@ -106,11 +107,11 @@ public class GroupAutoTakedownTest { DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51); // Group #2 -> down - assertEquals("version:1 distributor:6 storage:4", + assertEquals("distributor:6 storage:4", stateAfterStorageTransition(fixture, 5, State.DOWN)); // Group #2 -> back up again - assertEquals("version:2 distributor:6 storage:6", + assertEquals("distributor:6 storage:6", stateAfterStorageTransition(fixture, 5, State.UP)); } @@ -119,16 +120,12 @@ public class GroupAutoTakedownTest { ClusterFixture fixture = createFixtureForAllUpHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51); - assertEquals("version:1 distributor:6 storage:4", + assertEquals("distributor:6 storage:4", stateAfterStorageTransition(fixture, 5, State.DOWN)); // 4, 5 in same group; this should not cause a new state since it's already implicitly down fixture.reportStorageNodeState(4, State.DOWN); - - SystemStateListener listener = mock(SystemStateListener.class); - assertFalse(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener)); - - assertEquals("version:1 distributor:6 storage:4", fixture.generatedClusterState()); + assertEquals("distributor:6 storage:4", fixture.generatedClusterState()); } @Test @@ -139,7 +136,7 @@ public class GroupAutoTakedownTest { // Nodes 6 and 7 are taken down implicitly and should have a message reflecting this. // Node 8 is taken down by the fixture and gets a fixture-assigned message that // we should _not_ lose/overwrite. - assertEquals("version:1 distributor:9 storage:9 .6.s:d " + + assertEquals("distributor:9 storage:9 .6.s:d " + ".6.m:group\\x20node\\x20availability\\x20below\\x20configured\\x20threshold " + ".7.s:d " + ".7.m:group\\x20node\\x20availability\\x20below\\x20configured\\x20threshold " + @@ -151,12 +148,12 @@ public class GroupAutoTakedownTest { public void legacy_cluster_wide_availabilty_ratio_is_computed_after_group_takedowns() { ClusterFixture fixture = createFixtureForAllUpHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51); - fixture.generator.setMinNodesUp(5, 5, 0.51, 0.51); + fixture.setMinNodesUp(5, 5, 0.51, 0.51); // Taking down a node in a group forces the entire group down, which leaves us with // only 4 content nodes (vs. minimum of 5 as specified above). The entire cluster // should be marked as down in this case. - assertEquals("version:1 cluster:d distributor:6 storage:4", + assertEquals("cluster:d distributor:6 storage:4", stateAfterStorageTransition(fixture, 5, State.DOWN)); } @@ -165,16 +162,12 @@ public class GroupAutoTakedownTest { ClusterFixture fixture = createFixtureForAllUpHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(3), 0.99); - NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 5)); - fixture.generator.proposeNewNodeState(nodeInfo, new NodeState(NodeType.STORAGE, State.MAINTENANCE)); - SystemStateListener listener = mock(SystemStateListener.class); - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener)); - + fixture.proposeStorageNodeWantedState(5, State.MAINTENANCE); // Maintenance not counted as down, so group still up - assertEquals("version:1 distributor:9 storage:9 .5.s:m", fixture.generatedClusterState()); + assertEquals("distributor:9 storage:9 .5.s:m", fixture.generatedClusterState()); // Group goes down, but maintenance node should still be in maintenance - assertEquals("version:2 distributor:9 storage:9 .3.s:d .4.s:d .5.s:m", + assertEquals("distributor:9 storage:9 .3.s:d .4.s:d .5.s:m", stateAfterStorageTransition(fixture, 4, State.DOWN)); } @@ -186,51 +179,16 @@ public class GroupAutoTakedownTest { // Our timers are mocked, so taking down node 4 will deterministically transition to // a transient maintenance mode. Group should not be taken down here. - assertEquals("version:1 distributor:9 storage:9 .4.s:m", + assertEquals("distributor:9 storage:9 .4.s:m", stateAfterStorageTransition(fixture, 4, State.DOWN)); // However, once grace period expires the group should be taken down. fixture.timer.advanceTime(1001); NodeStateOrHostInfoChangeHandler changeListener = mock(NodeStateOrHostInfoChangeHandler.class); - fixture.generator.watchTimers(fixture.cluster, changeListener); - SystemStateListener stateListener = mock(SystemStateListener.class); - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, stateListener)); - - assertEquals("version:2 distributor:9 storage:9 .3.s:d .4.s:d .5.s:d", fixture.generatedClusterState()); - } - - private static class NodeEventWithDescription extends ArgumentMatcher<NodeEvent> { - private final String expected; - - NodeEventWithDescription(String expected) { - this.expected = expected; - } - - @Override - public boolean matches(Object o) { - return expected.equals(((NodeEvent)o).getDescription()); - } - } + fixture.nodeStateChangeHandler.watchTimers( + fixture.cluster, fixture.annotatedGeneratedClusterState().getClusterState(), changeListener); - private static NodeEventWithDescription nodeEventWithDescription(String description) { - return new NodeEventWithDescription(description); - } - - private static class EventForNode extends ArgumentMatcher<NodeEvent> { - private final Node expected; - - EventForNode(Node expected) { - this.expected = expected; - } - - @Override - public boolean matches(Object o) { - return ((NodeEvent)o).getNode().getNode().equals(expected); - } - } - - private static EventForNode eventForNode(Node expected) { - return new EventForNode(expected); + assertEquals("distributor:9 storage:9 .3.s:d .4.s:d .5.s:d", fixture.generatedClusterState()); } private static Node contentNode(int index) { @@ -242,13 +200,14 @@ public class GroupAutoTakedownTest { ClusterFixture fixture = createFixtureForAllUpHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51); - assertEquals("version:1 distributor:6 storage:4", - stateAfterStorageTransition(fixture, 5, State.DOWN)); + final List<Event> events = EventDiffCalculator.computeEventDiff(EventDiffCalculator.params() + .cluster(fixture.cluster) + .fromState(fixture.annotatedGeneratedClusterState()) + .toState(annotatedStateAfterStorageTransition(fixture, 5, State.DOWN))); - verify(fixture.eventLog).addNodeOnlyEvent(argThat(allOf( - nodeEventWithDescription("Setting node down as the total availability of its group is " + - "below the configured threshold"), - eventForNode(contentNode(4)))), any()); + assertThat(events, hasItem(allOf( + nodeEventWithDescription("Group node availability is below configured threshold"), + eventForNode(contentNode(4))))); } @Test @@ -256,30 +215,31 @@ public class GroupAutoTakedownTest { ClusterFixture fixture = createFixtureForAllUpHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51); - assertEquals("version:1 distributor:6 storage:4", + assertEquals("distributor:6 storage:4", stateAfterStorageTransition(fixture, 5, State.DOWN)); - assertEquals("version:2 distributor:6 storage:6", - stateAfterStorageTransition(fixture, 5, State.UP)); - verify(fixture.eventLog).addNodeOnlyEvent(argThat(allOf( - nodeEventWithDescription("Group availability restored; taking node back up"), - eventForNode(contentNode(4)))), any()); + final List<Event> events = EventDiffCalculator.computeEventDiff(EventDiffCalculator.params() + .cluster(fixture.cluster) + .fromState(fixture.annotatedGeneratedClusterState()) + .toState(annotatedStateAfterStorageTransition(fixture, 5, State.UP))); + + assertThat(events, hasItem(allOf( + nodeEventWithDescription("Group node availability has been restored"), + eventForNode(contentNode(4))))); } @Test - public void wanted_state_retired_implicitly_down_node_transitioned_it_to_retired_mode_immediately() { + public void wanted_state_retired_implicitly_down_node_is_transitioned_to_retired_mode_immediately() { ClusterFixture fixture = createFixtureForAllUpHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(3), 0.99); - assertEquals("version:1 distributor:9 storage:6", + assertEquals("distributor:9 storage:6", stateAfterStorageTransition(fixture, 6, State.DOWN)); // Node 7 is implicitly down. Mark wanted state as retired. It should now be Retired // but not Down. fixture.proposeStorageNodeWantedState(7, State.RETIRED); - SystemStateListener stateListener = mock(SystemStateListener.class); - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, stateListener)); - assertEquals("version:2 distributor:9 storage:8 .6.s:d .7.s:r", fixture.generatedClusterState()); + assertEquals("distributor:9 storage:8 .6.s:d .7.s:r", fixture.generatedClusterState()); } @Test @@ -287,9 +247,9 @@ public class GroupAutoTakedownTest { ClusterFixture fixture = createFixtureForAllUpHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.49); - assertEquals("version:1 distributor:6 storage:6 .4.s:d", + assertEquals("distributor:6 storage:6 .4.s:d", stateAfterStorageTransition(fixture, 4, State.DOWN)); - assertEquals("version:2 distributor:6 storage:4", + assertEquals("distributor:6 storage:4", stateAfterStorageTransition(fixture, 5, State.DOWN)); // Node 5 gets config-retired under our feet. @@ -299,9 +259,8 @@ public class GroupAutoTakedownTest { // TODO this should ideally also set the retired flag in the distribution // config, but only the ConfiguredNodes are actually looked at currently. fixture.cluster.setNodes(nodes); - fixture.generator.setNodes(fixture.cluster.clusterInfo()); - assertEquals("version:3 distributor:6 storage:6 .4.s:d .5.s:r", + assertEquals("distributor:6 storage:6 .4.s:d .5.s:r", stateAfterStorageTransition(fixture, 5, State.UP)); } @@ -314,14 +273,12 @@ public class GroupAutoTakedownTest { newState.setInitProgress(0.5); fixture.reportStorageNodeState(4, newState); - SystemStateListener stateListener = mock(SystemStateListener.class); - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, stateListener)); - assertEquals("version:1 distributor:6 storage:6 .4.s:i .4.i:0.5", fixture.generatedClusterState()); + assertEquals("distributor:6 storage:6 .4.s:i .4.i:0.5", fixture.generatedClusterState()); - assertEquals("version:2 distributor:6 storage:4", + assertEquals("distributor:6 storage:4", stateAfterStorageTransition(fixture, 5, State.DOWN)); - assertEquals("version:3 distributor:6 storage:6 .4.s:i .4.i:0.5", + assertEquals("distributor:6 storage:6 .4.s:i .4.i:0.5", stateAfterStorageTransition(fixture, 5, State.UP)); } @@ -330,20 +287,17 @@ public class GroupAutoTakedownTest { ClusterFixture fixture = createFixtureForAllUpHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51); - final Node node = new Node(NodeType.STORAGE, 4); final NodeState newState = new NodeState(NodeType.STORAGE, State.UP); newState.setDiskCount(7); newState.setDiskState(5, new DiskState(State.DOWN)); fixture.reportStorageNodeState(4, newState); - SystemStateListener stateListener = mock(SystemStateListener.class); - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, stateListener)); - assertEquals("version:1 distributor:6 storage:6 .4.d:7 .4.d.5.s:d", fixture.generatedClusterState()); + assertEquals("distributor:6 storage:6 .4.d:7 .4.d.5.s:d", fixture.generatedClusterState()); - assertEquals("version:2 distributor:6 storage:4", + assertEquals("distributor:6 storage:4", stateAfterStorageTransition(fixture, 5, State.DOWN)); - assertEquals("version:3 distributor:6 storage:6 .4.d:7 .4.d.5.s:d", + assertEquals("distributor:6 storage:6 .4.d:7 .4.d.5.s:d", stateAfterStorageTransition(fixture, 5, State.UP)); } @@ -352,19 +306,15 @@ public class GroupAutoTakedownTest { ClusterFixture fixture = createFixtureForAllUpHierarchicCluster( DistributionBuilder.withGroups(3).eachWithNodeCount(3), 0.60); - NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 5)); - nodeInfo.setWantedState(new NodeState(NodeType.STORAGE, State.DOWN).setDescription("borkbork")); - fixture.generator.proposeNewNodeState(nodeInfo, nodeInfo.getWantedState()); - SystemStateListener listener = mock(SystemStateListener.class); - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener)); + fixture.proposeStorageNodeWantedState(5, State.DOWN, "borkbork"); - assertEquals("version:1 distributor:9 storage:9 .5.s:d .5.m:borkbork", fixture.verboseGeneratedClusterState()); + assertEquals("distributor:9 storage:9 .5.s:d .5.m:borkbork", fixture.verboseGeneratedClusterState()); - assertEquals("version:2 distributor:9 storage:9 " + + assertEquals("distributor:9 storage:9 " + ".3.s:d .3.m:group\\x20node\\x20availability\\x20below\\x20configured\\x20threshold " + ".4.s:d .4.m:mockdesc .5.s:d .5.m:borkbork", verboseStateAfterStorageTransition(fixture, 4, State.DOWN)); - assertEquals("version:3 distributor:9 storage:9 .5.s:d .5.m:borkbork", + assertEquals("distributor:9 storage:9 .5.s:d .5.m:borkbork", verboseStateAfterStorageTransition(fixture, 4, State.UP)); } @@ -378,25 +328,23 @@ public class GroupAutoTakedownTest { fixture.reportStorageNodeState(4, newState); - SystemStateListener listener = mock(SystemStateListener.class); - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener)); - - assertEquals("version:1 distributor:6 storage:6 .4.t:123456", fixture.generatedClusterState()); + assertEquals("distributor:6 storage:6 .4.t:123456", fixture.generatedClusterState()); DatabaseHandler handler = mock(DatabaseHandler.class); DatabaseHandler.Context context = mock(DatabaseHandler.Context.class); when(context.getCluster()).thenReturn(fixture.cluster); - fixture.generator.handleAllDistributorsInSync(handler, context); - assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener)); + Set<ConfiguredNode> nodes = new HashSet<>(fixture.cluster.clusterInfo().getConfiguredNodes().values()); + fixture.nodeStateChangeHandler.handleAllDistributorsInSync( + fixture.annotatedGeneratedClusterState().getClusterState(), nodes, handler, context); // Timestamp should now be cleared from state - assertEquals("version:2 distributor:6 storage:6", fixture.generatedClusterState()); + assertEquals("distributor:6 storage:6", fixture.generatedClusterState()); // Trigger a group down+up edge. Timestamp should _not_ be reintroduced since it was previously cleared. - assertEquals("version:3 distributor:6 storage:4", + assertEquals("distributor:6 storage:4", stateAfterStorageTransition(fixture, 5, State.DOWN)); - assertEquals("version:4 distributor:6 storage:6", + assertEquals("distributor:6 storage:6", stateAfterStorageTransition(fixture, 5, State.UP)); } diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java index ba2cd287a9a..80435ee7c7d 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java @@ -191,15 +191,15 @@ public class MasterElectionTest extends FleetControllerTest { log.log(LogLevel.INFO, "Leaving waitForMaster"); } - private static class VersionMonotonicityChecker { + private static class StrictlyIncreasingVersionChecker { private ClusterState lastState; - private VersionMonotonicityChecker(ClusterState initialState) { + private StrictlyIncreasingVersionChecker(ClusterState initialState) { this.lastState = initialState; } - public static VersionMonotonicityChecker bootstrappedWith(ClusterState initialState) { - return new VersionMonotonicityChecker(initialState); + public static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) { + return new StrictlyIncreasingVersionChecker(initialState); } public void updateAndVerify(ClusterState currentState) { @@ -207,7 +207,7 @@ public class MasterElectionTest extends FleetControllerTest { lastState = currentState; if (currentState.getVersion() <= last.getVersion()) { throw new IllegalStateException( - String.format("Cluster state version monotonicity invariant broken! " + + String.format("Cluster state version strict increase invariant broken! " + "Old state was '%s', new state is '%s'", last, currentState)); } } @@ -226,7 +226,8 @@ public class MasterElectionTest extends FleetControllerTest { waitForStableSystem(); waitForMaster(0); Arrays.asList(0, 1, 2, 3, 4).stream().forEach(this::waitForCompleteCycle); - VersionMonotonicityChecker checker = VersionMonotonicityChecker.bootstrappedWith(fleetControllers.get(0).getClusterState()); + StrictlyIncreasingVersionChecker checker = StrictlyIncreasingVersionChecker.bootstrappedWith( + fleetControllers.get(0).getClusterState()); fleetControllers.get(0).shutdown(); waitForMaster(1); Arrays.asList(1, 2, 3, 4).stream().forEach(this::waitForCompleteCycle); diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java new file mode 100644 index 00000000000..bf0adf7736c --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java @@ -0,0 +1,80 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import com.yahoo.vdslib.state.Node; +import com.yahoo.vdslib.state.NodeType; +import com.yahoo.vdslib.state.State; +import org.junit.Test; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class NodeInfoTest { + + @Test + public void unstable_init_flag_is_initially_clear() { + ClusterFixture fixture = ClusterFixture.forFlatCluster(3); + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); + assertFalse(nodeInfo.recentlyObservedUnstableDuringInit()); + } + + private static ClusterFixture fixtureWithNodeMarkedAsUnstableInit(int nodeIndex) { + return ClusterFixture.forFlatCluster(3) + .reportStorageNodeState(nodeIndex, State.INITIALIZING) + .reportStorageNodeState(nodeIndex, State.DOWN); + } + + @Test + public void down_edge_during_init_state_marks_as_unstable_init() { + ClusterFixture fixture = fixtureWithNodeMarkedAsUnstableInit(1); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); + assertTrue(nodeInfo.recentlyObservedUnstableDuringInit()); + } + + @Test + public void stopping_edge_during_init_does_not_mark_as_unstable_init() { + ClusterFixture fixture = ClusterFixture.forFlatCluster(3).reportStorageNodeState(0, State.INITIALIZING); + fixture.reportStorageNodeState(0, State.STOPPING); + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0)); + + assertFalse(nodeInfo.recentlyObservedUnstableDuringInit()); + } + + /** + * The cluster controller will, after a time of observed stable state, reset the crash + * counter for a given node. This should also reset the unstable init flag to keep it + * from haunting a now stable node. + */ + @Test + public void zeroing_crash_count_resets_unstable_init_flag() { + ClusterFixture fixture = fixtureWithNodeMarkedAsUnstableInit(1); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); + nodeInfo.setPrematureCrashCount(0); + assertFalse(nodeInfo.recentlyObservedUnstableDuringInit()); + } + + /** + * A non-zero crash count update, on the other hand, implies the node is suffering + * further instabilities and should not clear the unstable init flag. + */ + @Test + public void non_zero_crash_count_update_does_not_reset_unstable_init_flag() { + ClusterFixture fixture = fixtureWithNodeMarkedAsUnstableInit(1); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); + nodeInfo.setPrematureCrashCount(3); + assertTrue(nodeInfo.recentlyObservedUnstableDuringInit()); + } + + @Test + public void non_zero_crash_count_does_not_implicitly_set_unstable_init_flag() { + ClusterFixture fixture = ClusterFixture.forFlatCluster(3); + + final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1)); + nodeInfo.setPrematureCrashCount(1); + assertFalse(nodeInfo.recentlyObservedUnstableDuringInit()); + } + +} diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java index 2816b75622e..f7f86907205 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java @@ -437,13 +437,13 @@ public class RpcServerTest extends FleetControllerTest { { // Configuration change: Remove the previously retired nodes /* TODO: Verify current result: version:23 distributor:7 .0.s:d .1.s:d .2.s:d .3.s:d .4.s:d storage:7 .0.s:m .1.s:m .2.s:m .3.s:m .4.s:m - TODO: Make this work without stopping/disconnecting (see SystemStateGenerator.setNodes + TODO: Make this work without stopping/disconnecting (see StateChangeHandler.setNodes Set<ConfiguredNode> configuredNodes = new TreeSet<>(); configuredNodes.add(new ConfiguredNode(5, false)); configuredNodes.add(new ConfiguredNode(6, false)); FleetControllerOptions options = new FleetControllerOptions("mycluster", configuredNodes); options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs; - this.options.maxInitProgressTime = 30000; + this.options.maxInitProgressTimeMs = 30000; this.options.stableStateTimePeriod = 60000; fleetController.updateOptions(options, 0); for (int i = 0; i < 5*2; i++) { diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/SystemStateGeneratorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandlerTest.java index 35118933b42..f591e8efc06 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/SystemStateGeneratorTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandlerTest.java @@ -6,7 +6,6 @@ import com.yahoo.vdslib.distribution.Distribution; import com.yahoo.vdslib.state.*; import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo; import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChangeHandler; -import com.yahoo.vespa.clustercontroller.core.listeners.SystemStateListener; import com.yahoo.vespa.clustercontroller.core.mocks.TestEventLog; import com.yahoo.vespa.clustercontroller.core.testutils.LogFormatter; import junit.framework.TestCase; @@ -16,33 +15,16 @@ import java.util.Set; import java.util.TreeSet; import java.util.logging.Logger; -public class SystemStateGeneratorTest extends TestCase { - private static final Logger log = Logger.getLogger(SystemStateGeneratorTest.class.getName()); - class Config { +public class StateChangeHandlerTest extends TestCase { + private static final Logger log = Logger.getLogger(StateChangeHandlerTest.class.getName()); + private class Config { int nodeCount = 3; int stableStateTime = 1000 * 60000; int maxSlobrokDisconnectPeriod = 60000; int maxPrematureCrashes = 3; } - class TestSystemStateListener implements SystemStateListener { - LinkedList<ClusterState> states = new LinkedList<>(); - @Override - public void handleNewSystemState(ClusterState state) { - states.add(state); - } - - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("States("); - for (ClusterState state : states) sb.append('\n').append(state.toString()); - sb.append(")"); - return sb.toString(); - } - - } - - class TestNodeStateOrHostInfoChangeHandler implements NodeStateOrHostInfoChangeHandler { + private class TestNodeStateOrHostInfoChangeHandler implements NodeStateOrHostInfoChangeHandler { LinkedList<String> events = new LinkedList<>(); @@ -75,9 +57,9 @@ public class SystemStateGeneratorTest extends TestCase { private Set<ConfiguredNode> configuredNodes = new TreeSet<>(); private Config config; private ContentCluster cluster; - private SystemStateGenerator generator; - private TestSystemStateListener systemStateListener; + private StateChangeHandler nodeStateChangeHandler; private TestNodeStateOrHostInfoChangeHandler nodeStateUpdateListener; + private final ClusterStateGenerator.Params params = new ClusterStateGenerator.Params(); public void setUp() { LogFormatter.initializeLogging(); @@ -88,20 +70,18 @@ public class SystemStateGeneratorTest extends TestCase { this.config = config; for (int i=0; i<config.nodeCount; ++i) configuredNodes.add(new ConfiguredNode(i, false)); cluster = new ContentCluster("testcluster", configuredNodes, distribution, 0, 0.0); - generator = new SystemStateGenerator(clock, eventLog, null); - generator.setNodes(cluster.clusterInfo()); - generator.setStableStateTimePeriod(config.stableStateTime); - generator.setMaxPrematureCrashes(config.maxPrematureCrashes); - generator.setMaxSlobrokDisconnectGracePeriod(config.maxSlobrokDisconnectPeriod); - generator.setMinNodesUp(1, 1, 0, 0); - systemStateListener = new TestSystemStateListener(); + nodeStateChangeHandler = new StateChangeHandler(clock, eventLog, null); + params.minStorageNodesUp(1).minDistributorNodesUp(1) + .minRatioOfStorageNodesUp(0.0).minRatioOfDistributorNodesUp(0.0) + .maxPrematureCrashes(config.maxPrematureCrashes) + .transitionTimes(5000) + .cluster(cluster); nodeStateUpdateListener = new TestNodeStateOrHostInfoChangeHandler(); } - private void assertNewClusterStateReceived() { - assertTrue(generator.notifyIfNewSystemState(cluster, systemStateListener)); - assertTrue(systemStateListener.toString(), systemStateListener.states.size() == 1); - systemStateListener.states.clear(); + private ClusterState currentClusterState() { + params.currentTimeInMilllis(clock.getCurrentTimeInMillis()); + return ClusterStateGenerator.generatedStateFrom(params).getClusterState(); } private void startWithStableStateClusterWithNodesUp() { @@ -109,61 +89,55 @@ public class SystemStateGeneratorTest extends TestCase { for (ConfiguredNode i : configuredNodes) { NodeInfo nodeInfo = cluster.clusterInfo().setRpcAddress(new Node(type, i.index()), null); nodeInfo.markRpcAddressLive(); - generator.handleNewReportedNodeState(nodeInfo, new NodeState(type, State.UP), null); + nodeStateChangeHandler.handleNewReportedNodeState( + currentClusterState(), nodeInfo, new NodeState(type, State.UP), null); nodeInfo.setReportedState(new NodeState(type, State.UP), clock.getCurrentTimeInMillis()); } } - assertNewClusterStateReceived(); for (NodeType type : NodeType.getTypes()) { for (ConfiguredNode i : configuredNodes) { Node n = new Node(type, i.index()); - assertEquals(State.UP, generator.getClusterState().getNodeState(n).getState()); + assertEquals(State.UP, currentClusterState().getNodeState(n).getState()); } } clock.advanceTime(config.stableStateTime); } private void markNodeOutOfSlobrok(Node node) { + final ClusterState stateBefore = currentClusterState(); log.info("Marking " + node + " out of slobrok"); cluster.getNodeInfo(node).markRpcAddressOutdated(clock); - generator.handleMissingNode(cluster.getNodeInfo(node), nodeStateUpdateListener); - assertTrue(nodeStateUpdateListener.toString(), nodeStateUpdateListener.events.isEmpty()); - nodeStateUpdateListener.events.clear(); + nodeStateChangeHandler.handleMissingNode(stateBefore, cluster.getNodeInfo(node), nodeStateUpdateListener); assertTrue(eventLog.toString(), eventLog.toString().contains("Node is no longer in slobrok")); eventLog.clear(); } private void markNodeBackIntoSlobrok(Node node, State state) { + final ClusterState stateBefore = currentClusterState(); log.info("Marking " + node + " back in slobrok"); cluster.getNodeInfo(node).markRpcAddressLive(); - generator.handleReturnedRpcAddress(cluster.getNodeInfo(node)); - assertEquals(0, nodeStateUpdateListener.events.size()); - assertEquals(0, systemStateListener.states.size()); - generator.handleNewReportedNodeState(cluster.getNodeInfo(node), new NodeState(node.getType(), state), nodeStateUpdateListener); + nodeStateChangeHandler.handleReturnedRpcAddress(cluster.getNodeInfo(node)); + nodeStateChangeHandler.handleNewReportedNodeState( + stateBefore, cluster.getNodeInfo(node), + new NodeState(node.getType(), state), nodeStateUpdateListener); cluster.getNodeInfo(node).setReportedState(new NodeState(node.getType(), state), clock.getCurrentTimeInMillis()); - assertEquals(0, nodeStateUpdateListener.events.size()); - assertEquals(0, systemStateListener.states.size()); } private void verifyClusterStateChanged(Node node, State state) { log.info("Verifying cluster state has been updated for " + node + " to " + state); - assertTrue(generator.notifyIfNewSystemState(cluster, systemStateListener)); - assertEquals(1, systemStateListener.states.size()); - assertEquals(state, systemStateListener.states.get(0).getNodeState(node).getState()); - systemStateListener.states.clear(); - assertEquals(state, generator.getClusterState().getNodeState(node).getState()); + assertTrue(nodeStateChangeHandler.stateMayHaveChanged()); + assertEquals(state, currentClusterState().getNodeState(node).getState()); } private void verifyNodeStateAfterTimerWatch(Node node, State state) { log.info("Verifying state of node after timer watch."); - generator.watchTimers(cluster, nodeStateUpdateListener); + nodeStateChangeHandler.watchTimers(cluster, currentClusterState(), nodeStateUpdateListener); assertEquals(0, nodeStateUpdateListener.events.size()); verifyClusterStateChanged(node, state); } private void verifyPrematureCrashCountCleared(Node node) { - assertTrue(generator.watchTimers(cluster, nodeStateUpdateListener)); - assertEquals(0, nodeStateUpdateListener.events.size()); + assertTrue(nodeStateChangeHandler.watchTimers(cluster, currentClusterState(), nodeStateUpdateListener)); assertEquals(0, cluster.getNodeInfo(node).getPrematureCrashCount()); } @@ -175,15 +149,15 @@ public class SystemStateGeneratorTest extends TestCase { log.info("Iteration " + j); assertEquals(0, cluster.getNodeInfo(node).getPrematureCrashCount()); assertEquals(State.UP, cluster.getNodeInfo(node).getWantedState().getState()); - assertEquals(State.UP, generator.getClusterState().getNodeState(node).getState()); + assertEquals(State.UP, currentClusterState().getNodeState(node).getState()); for (int k=0; k<config.maxPrematureCrashes; ++k) { log.info("Premature iteration " + k); markNodeOutOfSlobrok(node); log.info("Passing max disconnect time period. Watching timers"); clock.advanceTime(config.maxSlobrokDisconnectPeriod); - verifyNodeStateAfterTimerWatch(node, State.MAINTENANCE); + cluster.getNodeInfo(node).setReportedState(new NodeState(node.getType(), State.DOWN), clock.getCurrentTimeInMillis()); assertEquals(k, cluster.getNodeInfo(node).getPrematureCrashCount()); diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java index b94691bb880..c31f80d9b53 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java @@ -8,8 +8,10 @@ import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler; import com.yahoo.vespa.clustercontroller.core.testutils.StateWaiter; import com.yahoo.vespa.clustercontroller.utils.util.NoMetricReporter; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -43,7 +45,7 @@ public class StateChangeTest extends FleetControllerTest { options.minStorageNodesUp, options.minRatioOfStorageNodesUp); NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, eventLog); DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer); - SystemStateGenerator stateGenerator = new SystemStateGenerator(timer, eventLog, metricUpdater); + StateChangeHandler stateGenerator = new StateChangeHandler(timer, eventLog, metricUpdater); SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer); MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer); ctrl = new FleetController(timer, eventLog, cluster, stateGatherer, communicator, null, null, communicator, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options); @@ -109,8 +111,13 @@ public class StateChangeTest extends FleetControllerTest { // Now, fleet controller should have generated a new cluster state. ctrl.tick(); - assertEquals("version:6 distributor:10 .0.s:i .0.i:0.0 .1.s:i .1.i:0.0 .2.s:i .2.i:0.0 .3.s:i .3.i:0.0 .4.s:i .4.i:0.0 .5.s:i .5.i:0.0 .6.s:i .6.i:0.0 .7.s:i .7.i:0.0 .8.s:i .8.i:0.0 .9.s:i .9.i:0.0 storage:10 .0.s:i .0.i:0.9 .1.s:i .1.i:0.9 .2.s:i .2.i:0.9 .3.s:i .3.i:0.9 .4.s:i .4.i:0.9 .5.s:i .5.i:0.9 .6.s:i .6.i:0.9 .7.s:i .7.i:0.9 .8.s:i .8.i:0.9 .9.s:i .9.i:0.9", - ctrl.getSystemState().toString()); + // Regular init progress does not update the cluster state until the node is done initializing (or goes down, + // whichever comes first). + assertEquals("version:6 distributor:10 .0.s:i .0.i:0.0 .1.s:i .1.i:0.0 .2.s:i .2.i:0.0 .3.s:i .3.i:0.0 " + + ".4.s:i .4.i:0.0 .5.s:i .5.i:0.0 .6.s:i .6.i:0.0 .7.s:i .7.i:0.0 .8.s:i .8.i:0.0 " + + ".9.s:i .9.i:0.0 storage:10 .0.s:i .0.i:0.1 .1.s:i .1.i:0.1 .2.s:i .2.i:0.1 .3.s:i .3.i:0.1 " + + ".4.s:i .4.i:0.1 .5.s:i .5.i:0.1 .6.s:i .6.i:0.1 .7.s:i .7.i:0.1 .8.s:i .8.i:0.1 .9.s:i .9.i:0.1", + ctrl.consolidatedClusterState().toString()); timer.advanceTime(options.maxInitProgressTime / 20); ctrl.tick(); @@ -131,24 +138,23 @@ public class StateChangeTest extends FleetControllerTest { assertEquals("version:8 distributor:10 storage:10", ctrl.getSystemState().toString()); - verifyNodeEvents(new Node(NodeType.DISTRIBUTOR, 0), "Event: distributor.0: Now reporting state U\n" + - "Event: distributor.0: Altered node state in cluster state from 'D' to 'U'.\n" + + "Event: distributor.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + "Event: distributor.0: Now reporting state I, i 0.00\n" + - "Event: distributor.0: Altered node state in cluster state from 'U' to 'I, i 0.00'.\n" + + "Event: distributor.0: Altered node state in cluster state from 'U' to 'I, i 0.00'\n" + "Event: distributor.0: Now reporting state U\n" + - "Event: distributor.0: Altered node state in cluster state from 'I, i 0.00' to 'U'.\n"); + "Event: distributor.0: Altered node state in cluster state from 'I, i 0.00' to 'U'\n"); verifyNodeEvents(new Node(NodeType.STORAGE, 0), "Event: storage.0: Now reporting state U\n" + - "Event: storage.0: Altered node state in cluster state from 'D' to 'U'.\n" + + "Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + "Event: storage.0: Now reporting state I, i 0.00 (ls)\n" + - "Event: storage.0: Altered node state in cluster state from 'U' to 'D: Listing buckets. Progress 0.0 %.'.\n" + + "Event: storage.0: Altered node state in cluster state from 'U' to 'D'\n" + "Event: storage.0: Now reporting state I, i 0.100 (read)\n" + - "Event: storage.0: Altered node state in cluster state from 'D: Listing buckets. Progress 0.0 %.' to 'I, i 0.100 (read)'.\n" + + "Event: storage.0: Altered node state in cluster state from 'D' to 'I, i 0.100 (read)'\n" + "Event: storage.0: Now reporting state U\n" + - "Event: storage.0: Altered node state in cluster state from 'I, i 0.900 (read)' to 'U'.\n"); + "Event: storage.0: Altered node state in cluster state from 'I, i 0.100 (read)' to 'U'\n"); } @Test @@ -172,7 +178,6 @@ public class StateChangeTest extends FleetControllerTest { assertEquals("version:4 distributor:10 .0.s:d storage:10", ctrl.getSystemState().toString()); timer.advanceTime(1000); - long distStartTime = timer.getCurrentTimeInMillis() / 1000; ctrl.tick(); @@ -210,23 +215,24 @@ public class StateChangeTest extends FleetControllerTest { verifyNodeEvents(new Node(NodeType.DISTRIBUTOR, 0), "Event: distributor.0: Now reporting state U\n" + - "Event: distributor.0: Altered node state in cluster state from 'D' to 'U'.\n" + + "Event: distributor.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + "Event: distributor.0: Failed to get node state: D: Closed at other end\n" + "Event: distributor.0: Stopped or possibly crashed after 0 ms, which is before stable state time period. Premature crash count is now 1.\n" + - "Event: distributor.0: Altered node state in cluster state from 'U' to 'D: Closed at other end'.\n" + + "Event: distributor.0: Altered node state in cluster state from 'U' to 'D: Closed at other end'\n" + "Event: distributor.0: Now reporting state U, t 12345678\n" + - "Event: distributor.0: Altered node state in cluster state from 'D: Closed at other end' to 'U, t 12345678'.\n"); + "Event: distributor.0: Altered node state in cluster state from 'D: Closed at other end' to 'U, t 12345678'\n" + + "Event: distributor.0: Altered node state in cluster state from 'U, t 12345678' to 'U'\n"); verifyNodeEvents(new Node(NodeType.STORAGE, 0), "Event: storage.0: Now reporting state U\n" + - "Event: storage.0: Altered node state in cluster state from 'D' to 'U'.\n" + + "Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + "Event: storage.0: Failed to get node state: D: Closed at other end\n" + "Event: storage.0: Stopped or possibly crashed after 1000 ms, which is before stable state time period. Premature crash count is now 1.\n" + - "Event: storage.0: Altered node state in cluster state from 'U' to 'M: Closed at other end'.\n" + + "Event: storage.0: Altered node state in cluster state from 'U' to 'M: Closed at other end'\n" + "Event: storage.0: 5001 milliseconds without contact. Marking node down.\n" + - "Event: storage.0: Altered node state in cluster state from 'M: Closed at other end' to 'D: Closed at other end'.\n" + + "Event: storage.0: Altered node state in cluster state from 'M: Closed at other end' to 'D: Closed at other end'\n" + "Event: storage.0: Now reporting state U, t 12345679\n" + - "Event: storage.0: Altered node state in cluster state from 'D: Closed at other end' to 'U, t 12345679'.\n"); + "Event: storage.0: Altered node state in cluster state from 'D: Closed at other end' to 'U, t 12345679'\n"); assertEquals(1, ctrl.getCluster().getNodeInfo(new Node(NodeType.DISTRIBUTOR, 0)).getPrematureCrashCount()); assertEquals(1, ctrl.getCluster().getNodeInfo(new Node(NodeType.STORAGE, 0)).getPrematureCrashCount()); @@ -239,7 +245,7 @@ public class StateChangeTest extends FleetControllerTest { @Test public void testNodeGoingDownAndUpNotifying() throws Exception { - // Same test as above, but node manage to notify why it is going down first. + // Same test as above, but node manages to notify why it is going down first. FleetControllerOptions options = new FleetControllerOptions("mycluster", createNodes(10)); options.nodeStateRequestTimeoutMS = 60 * 60 * 1000; options.maxSlobrokDisconnectGracePeriod = 100000; @@ -291,21 +297,21 @@ public class StateChangeTest extends FleetControllerTest { verifyNodeEvents(new Node(NodeType.DISTRIBUTOR, 0), "Event: distributor.0: Now reporting state U\n" + - "Event: distributor.0: Altered node state in cluster state from 'D' to 'U'.\n" + + "Event: distributor.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + "Event: distributor.0: Failed to get node state: D: controlled shutdown\n" + - "Event: distributor.0: Altered node state in cluster state from 'U' to 'D: controlled shutdown'.\n" + + "Event: distributor.0: Altered node state in cluster state from 'U' to 'D: controlled shutdown'\n" + "Event: distributor.0: Now reporting state U\n" + - "Event: distributor.0: Altered node state in cluster state from 'D: controlled shutdown' to 'U'.\n"); + "Event: distributor.0: Altered node state in cluster state from 'D: controlled shutdown' to 'U'\n"); verifyNodeEvents(new Node(NodeType.STORAGE, 0), "Event: storage.0: Now reporting state U\n" + - "Event: storage.0: Altered node state in cluster state from 'D' to 'U'.\n" + + "Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + "Event: storage.0: Failed to get node state: D: controlled shutdown\n" + - "Event: storage.0: Altered node state in cluster state from 'U' to 'M: controlled shutdown'.\n" + + "Event: storage.0: Altered node state in cluster state from 'U' to 'M: controlled shutdown'\n" + "Event: storage.0: 5001 milliseconds without contact. Marking node down.\n" + - "Event: storage.0: Altered node state in cluster state from 'M: controlled shutdown' to 'D: controlled shutdown'.\n" + + "Event: storage.0: Altered node state in cluster state from 'M: controlled shutdown' to 'D: controlled shutdown'\n" + "Event: storage.0: Now reporting state U\n" + - "Event: storage.0: Altered node state in cluster state from 'D: controlled shutdown' to 'U'.\n"); + "Event: storage.0: Altered node state in cluster state from 'D: controlled shutdown' to 'U'\n"); } @@ -346,7 +352,7 @@ public class StateChangeTest extends FleetControllerTest { verifyNodeEvents(new Node(NodeType.STORAGE, 0), "Event: storage.0: Now reporting state U\n" + - "Event: storage.0: Altered node state in cluster state from 'D' to 'U'.\n" + + "Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + "Event: storage.0: Node is no longer in slobrok, but we still have a pending state request.\n"); } @@ -393,15 +399,15 @@ public class StateChangeTest extends FleetControllerTest { verifyNodeEvents(new Node(NodeType.STORAGE, 6), "Event: storage.6: Now reporting state U\n" + - "Event: storage.6: Altered node state in cluster state from 'D' to 'U'.\n" + + "Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + "Event: storage.6: Failed to get node state: D: Connection error: Closed at other end\n" + "Event: storage.6: Stopped or possibly crashed after 0 ms, which is before stable state time period. Premature crash count is now 1.\n" + - "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'.\n" + + "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'\n" + "Event: storage.6: Now reporting state I, i 0.00 (ls)\n" + "Event: storage.6: Now reporting state I, i 0.600 (read)\n" + - "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'I, i 0.600 (read)'.\n" + + "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'I, i 0.600 (read)'\n" + "Event: storage.6: Now reporting state U\n" + - "Event: storage.6: Altered node state in cluster state from 'I, i 0.600 (read)' to 'U'.\n"); + "Event: storage.6: Altered node state in cluster state from 'I, i 0.600 (read)' to 'U'\n"); } @Test @@ -453,14 +459,14 @@ public class StateChangeTest extends FleetControllerTest { verifyNodeEvents(new Node(NodeType.STORAGE, 6), "Event: storage.6: Now reporting state U\n" + - "Event: storage.6: Altered node state in cluster state from 'D' to 'R'.\n" + + "Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'R'\n" + "Event: storage.6: Failed to get node state: D: Connection error: Closed at other end\n" + "Event: storage.6: Stopped or possibly crashed after 0 ms, which is before stable state time period. Premature crash count is now 1.\n" + - "Event: storage.6: Altered node state in cluster state from 'R' to 'M: Connection error: Closed at other end'.\n" + + "Event: storage.6: Altered node state in cluster state from 'R' to 'M: Connection error: Closed at other end'\n" + "Event: storage.6: Now reporting state I, i 0.00 (ls)\n" + "Event: storage.6: Now reporting state I, i 0.600 (read)\n" + "Event: storage.6: Now reporting state U\n" + - "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'R: Connection error: Closed at other end'.\n"); + "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'R'\n"); } @Test @@ -522,7 +528,7 @@ public class StateChangeTest extends FleetControllerTest { ctrl.tick(); - assertEquals("Listing buckets. Progress 0.1 %.", ctrl.getSystemState().getNodeState(new Node(NodeType.STORAGE, 6)).getDescription()); + assertEquals("version:5 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString()); communicator.setNodeState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.1), ""); @@ -542,16 +548,16 @@ public class StateChangeTest extends FleetControllerTest { verifyNodeEvents(new Node(NodeType.STORAGE, 6), "Event: storage.6: Now reporting state U\n" + - "Event: storage.6: Altered node state in cluster state from 'D' to 'U'.\n" + + "Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + "Event: storage.6: Failed to get node state: D: Connection error: Closed at other end\n" + - "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'.\n" + + "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'\n" + "Event: storage.6: 100000 milliseconds without contact. Marking node down.\n" + - "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'D: Connection error: Closed at other end'.\n" + + "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'D: Connection error: Closed at other end'\n" + "Event: storage.6: Now reporting state I, i 0.00100 (ls)\n" + "Event: storage.6: Now reporting state I, i 0.100 (read)\n" + - "Event: storage.6: Altered node state in cluster state from 'D: Listing buckets. Progress 0.1 %.' to 'I, i 0.100 (read)'.\n" + + "Event: storage.6: Altered node state in cluster state from 'D: Connection error: Closed at other end' to 'I, i 0.100 (read)'\n" + "Event: storage.6: Now reporting state U\n" + - "Event: storage.6: Altered node state in cluster state from 'I, i 0.100 (read)' to 'U'.\n"); + "Event: storage.6: Altered node state in cluster state from 'I, i 0.100 (read)' to 'U'\n"); } @Test @@ -613,9 +619,6 @@ public class StateChangeTest extends FleetControllerTest { // Still down since it seemingly crashed during last init. assertEquals("version:7 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString()); - assertEquals("Down: 5001 ms without initialize progress. Assuming node has deadlocked.", - ctrl.getSystemState().getNodeState(new Node(NodeType.STORAGE, 6)).toString()); - ctrl.tick(); communicator.setNodeState(new Node(NodeType.STORAGE, 6), State.UP, ""); @@ -626,20 +629,20 @@ public class StateChangeTest extends FleetControllerTest { verifyNodeEvents(new Node(NodeType.STORAGE, 6), "Event: storage.6: Now reporting state U\n" + - "Event: storage.6: Altered node state in cluster state from 'D' to 'U'.\n" + + "Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + "Event: storage.6: Failed to get node state: D: Connection error: Closed at other end\n" + - "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'.\n" + + "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'\n" + "Event: storage.6: 1000000 milliseconds without contact. Marking node down.\n" + - "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'D: Connection error: Closed at other end'.\n" + + "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'D: Connection error: Closed at other end'\n" + "Event: storage.6: Now reporting state I, i 0.100 (read)\n" + - "Event: storage.6: Altered node state in cluster state from 'D: Connection error: Closed at other end' to 'I, i 0.100 (read)'.\n" + + "Event: storage.6: Altered node state in cluster state from 'D: Connection error: Closed at other end' to 'I, i 0.100 (read)'\n" + "Event: storage.6: 5001 milliseconds without initialize progress. Marking node down. Premature crash count is now 1.\n" + - "Event: storage.6: Altered node state in cluster state from 'I, i 0.100 (read)' to 'D: 5001 ms without initialize progress. Assuming node has deadlocked.'.\n" + + "Event: storage.6: Altered node state in cluster state from 'I, i 0.100 (read)' to 'D'\n" + "Event: storage.6: Failed to get node state: D: Connection error: Closed at other end\n" + "Event: storage.6: Now reporting state I, i 0.00 (ls)\n" + "Event: storage.6: Now reporting state I, i 0.100 (read)\n" + "Event: storage.6: Now reporting state U\n" + - "Event: storage.6: Altered node state in cluster state from 'D: 5001 ms without initialize progress. Assuming node has deadlocked.' to 'U'.\n"); + "Event: storage.6: Altered node state in cluster state from 'D' to 'U'\n"); } @@ -684,9 +687,6 @@ public class StateChangeTest extends FleetControllerTest { ctrl.tick(); assertEquals("version:7 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString()); - - String desc = ctrl.getSystemState().getNodeState(new Node(NodeType.STORAGE, 6)).getDescription(); - assertEquals("Got reverse intialize progress. Assuming node have prematurely crashed", desc); } @Test @@ -1132,4 +1132,70 @@ public class StateChangeTest extends FleetControllerTest { } } + @Test + public void consolidated_cluster_state_reflects_node_changes_when_cluster_is_down() throws Exception { + FleetControllerOptions options = new FleetControllerOptions("mycluster", createNodes(10)); + options.maxTransitionTime.put(NodeType.STORAGE, 0); + options.minStorageNodesUp = 10; + options.minDistributorNodesUp = 10; + initialize(options); + + ctrl.tick(); + assertThat(ctrl.consolidatedClusterState().toString(), equalTo("version:3 distributor:10 storage:10")); + + communicator.setNodeState(new Node(NodeType.STORAGE, 2), State.DOWN, "foo"); + ctrl.tick(); + + assertThat(ctrl.consolidatedClusterState().toString(), + equalTo("version:4 cluster:d distributor:10 storage:10 .2.s:d")); + + // After this point, any further node changes while the cluster is still down won't be published. + // This is because cluster state similarity checks are short-circuited if both are Down, as no other parts + // of the state matter. Despite this, REST API access and similar features need up-to-date information, + // and therefore need to get a state which represents the _current_ state rather than the published state. + // The consolidated state offers this by selectively generating the current state on-demand if the + // cluster is down. + communicator.setNodeState(new Node(NodeType.STORAGE, 5), State.DOWN, "bar"); + ctrl.tick(); + + // NOTE: _same_ version, different node state content. Overall cluster down-state is still the same. + assertThat(ctrl.consolidatedClusterState().toString(), + equalTo("version:4 cluster:d distributor:10 storage:10 .2.s:d .5.s:d")); + } + + // Related to the above test, watchTimer invocations must receive the _current_ state and not the + // published state. Failure to ensure this would cause events to be fired non-stop, as the effect + // of previous timer invocations (with subsequent state generation) would not be visible. + @Test + public void timer_events_during_cluster_down_observe_most_recent_node_changes() throws Exception { + FleetControllerOptions options = new FleetControllerOptions("mycluster", createNodes(10)); + options.maxTransitionTime.put(NodeType.STORAGE, 1000); + options.minStorageNodesUp = 10; + options.minDistributorNodesUp = 10; + initialize(options); + + ctrl.tick(); + communicator.setNodeState(new Node(NodeType.STORAGE, 2), State.DOWN, "foo"); + timer.advanceTime(500); + ctrl.tick(); + communicator.setNodeState(new Node(NodeType.STORAGE, 3), State.DOWN, "foo"); + ctrl.tick(); + assertThat(ctrl.consolidatedClusterState().toString(), equalTo("version:4 cluster:d distributor:10 storage:10 .2.s:m .3.s:m")); + + // Subsequent timer tick should _not_ trigger additional events. Providing published state + // only would result in "Marking node down" events for node 2 emitted per tick. + for (int i = 0; i < 3; ++i) { + timer.advanceTime(5000); + ctrl.tick(); + } + + verifyNodeEvents(new Node(NodeType.STORAGE, 2), + "Event: storage.2: Now reporting state U\n" + + "Event: storage.2: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" + + "Event: storage.2: Failed to get node state: D: foo\n" + + "Event: storage.2: Stopped or possibly crashed after 500 ms, which is before stable state time period. Premature crash count is now 1.\n" + + "Event: storage.2: Altered node state in cluster state from 'U' to 'M: foo'\n" + + "Event: storage.2: 5000 milliseconds without contact. Marking node down.\n"); + } + } diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateVersionTrackerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateVersionTrackerTest.java new file mode 100644 index 00000000000..72f8c9fb8b7 --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateVersionTrackerTest.java @@ -0,0 +1,229 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core; + +import com.yahoo.vdslib.state.ClusterState; +import com.yahoo.vdslib.state.Node; +import com.yahoo.vdslib.state.NodeState; +import com.yahoo.vdslib.state.NodeType; +import com.yahoo.vdslib.state.State; +import org.junit.Test; + +import java.util.Arrays; +import java.util.Optional; + +import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; + +public class StateVersionTrackerTest { + + private static AnnotatedClusterState stateWithoutAnnotations(String stateStr) { + final ClusterState state = ClusterState.stateFromString(stateStr); + return new AnnotatedClusterState(state, Optional.empty(), AnnotatedClusterState.emptyNodeStateReasons()); + } + + private static StateVersionTracker createWithMockedMetrics() { + return new StateVersionTracker(mock(MetricUpdater.class)); + } + + private static void updateAndPromote(final StateVersionTracker versionTracker, + final AnnotatedClusterState state, + final long timeMs) + { + versionTracker.updateLatestCandidateState(state); + versionTracker.promoteCandidateToVersionedState(timeMs); + } + + @Test + public void version_is_incremented_when_new_state_is_applied() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + versionTracker.setVersionRetrievedFromZooKeeper(100); + updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:2 storage:2"), 123); + assertThat(versionTracker.getCurrentVersion(), equalTo(101)); + assertThat(versionTracker.getVersionedClusterState().toString(), equalTo("version:101 distributor:2 storage:2")); + } + + @Test + public void version_is_1_upon_construction() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + assertThat(versionTracker.getCurrentVersion(), equalTo(1)); + } + + @Test + public void set_current_version_caps_lowest_version_to_1() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + versionTracker.setVersionRetrievedFromZooKeeper(0); + assertThat(versionTracker.getCurrentVersion(), equalTo(1)); + } + + @Test + public void new_version_from_zk_predicate_initially_false() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + assertThat(versionTracker.hasReceivedNewVersionFromZooKeeper(), is(false)); + } + + @Test + public void new_version_from_zk_predicate_true_after_setting_zk_version() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + versionTracker.setVersionRetrievedFromZooKeeper(5); + assertThat(versionTracker.hasReceivedNewVersionFromZooKeeper(), is(true)); + } + + @Test + public void new_version_from_zk_predicate_false_after_applying_higher_version() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + versionTracker.setVersionRetrievedFromZooKeeper(5); + updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:2 storage:2"), 123); + assertThat(versionTracker.hasReceivedNewVersionFromZooKeeper(), is(false)); + } + + @Test + public void exposed_states_are_empty_upon_construction() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + assertThat(versionTracker.getVersionedClusterState().toString(), equalTo("")); + assertThat(versionTracker.getAnnotatedVersionedClusterState().getClusterState().toString(), equalTo("")); + } + + @Test + public void diff_from_initial_state_implies_changed_state() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + versionTracker.updateLatestCandidateState(stateWithoutAnnotations("cluster:d")); + assertTrue(versionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()); + } + + private static boolean stateChangedBetween(String fromState, String toState) { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + updateAndPromote(versionTracker, stateWithoutAnnotations(fromState), 123); + versionTracker.updateLatestCandidateState(stateWithoutAnnotations(toState)); + return versionTracker.candidateChangedEnoughFromCurrentToWarrantPublish(); + } + + @Test + public void version_mismatch_not_counted_as_changed_state() { + assertFalse(stateChangedBetween("distributor:2 storage:2", "distributor:2 storage:2")); + } + + @Test + public void different_distributor_node_count_implies_changed_state() { + assertTrue(stateChangedBetween("distributor:2 storage:2", "distributor:3 storage:2")); + assertTrue(stateChangedBetween("distributor:3 storage:2", "distributor:2 storage:2")); + } + + @Test + public void different_storage_node_count_implies_changed_state() { + assertTrue(stateChangedBetween("distributor:2 storage:2", "distributor:2 storage:3")); + assertTrue(stateChangedBetween("distributor:2 storage:3", "distributor:2 storage:2")); + } + + @Test + public void different_distributor_node_state_implies_changed_state() { + assertTrue(stateChangedBetween("distributor:2 storage:2", "distributor:2 .0.s:d storage:2")); + assertTrue(stateChangedBetween("distributor:2 .0.s:d storage:2", "distributor:2 storage:2")); + } + + @Test + public void different_storage_node_state_implies_changed_state() { + assertTrue(stateChangedBetween("distributor:2 storage:2", "distributor:2 storage:2 .0.s:d")); + assertTrue(stateChangedBetween("distributor:2 storage:2 .0.s:d", "distributor:2 storage:2")); + } + + @Test + public void lowest_observed_distribution_bit_is_initially_16() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + assertThat(versionTracker.getLowestObservedDistributionBits(), equalTo(16)); + } + + @Test + public void lowest_observed_distribution_bit_is_tracked_across_states() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + updateAndPromote(versionTracker, stateWithoutAnnotations("bits:15 distributor:2 storage:2"), 100); + assertThat(versionTracker.getLowestObservedDistributionBits(), equalTo(15)); + + updateAndPromote(versionTracker, stateWithoutAnnotations("bits:17 distributor:2 storage:2"), 200); + assertThat(versionTracker.getLowestObservedDistributionBits(), equalTo(15)); + + updateAndPromote(versionTracker, stateWithoutAnnotations("bits:14 distributor:2 storage:2"), 300); + assertThat(versionTracker.getLowestObservedDistributionBits(), equalTo(14)); + } + + // For similarity purposes, only the cluster-wide bits matter, not the individual node state + // min used bits. The former is derived from the latter, but the latter is not visible in the + // published state (but _is_ visible in the internal ClusterState structures). + @Test + public void per_node_min_bits_changes_are_not_considered_different() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + final AnnotatedClusterState stateWithMinBits = stateWithoutAnnotations("distributor:2 storage:2"); + stateWithMinBits.getClusterState().setNodeState( + new Node(NodeType.STORAGE, 0), + new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(15)); + updateAndPromote(versionTracker, stateWithMinBits, 123); + versionTracker.updateLatestCandidateState(stateWithoutAnnotations("distributor:2 storage:2")); + assertFalse(versionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()); + } + + @Test + public void state_history_is_initially_empty() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + assertTrue(versionTracker.getClusterStateHistory().isEmpty()); + } + + private static ClusterStateHistoryEntry historyEntry(final String state, final long time) { + return new ClusterStateHistoryEntry(ClusterState.stateFromString(state), time); + } + + @Test + public void applying_state_adds_to_cluster_state_history() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:2 storage:2") ,100); + updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:3 storage:3"), 200); + updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:4 storage:4"), 300); + + // Note: newest entry first + assertThat(versionTracker.getClusterStateHistory(), + equalTo(Arrays.asList( + historyEntry("version:4 distributor:4 storage:4", 300), + historyEntry("version:3 distributor:3 storage:3", 200), + historyEntry("version:2 distributor:2 storage:2", 100)))); + } + + @Test + public void old_states_pruned_when_state_history_limit_reached() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + versionTracker.setMaxHistoryEntryCount(2); + + updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:2 storage:2") ,100); + updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:3 storage:3"), 200); + updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:4 storage:4"), 300); + + assertThat(versionTracker.getClusterStateHistory(), + equalTo(Arrays.asList( + historyEntry("version:4 distributor:4 storage:4", 300), + historyEntry("version:3 distributor:3 storage:3", 200)))); + + updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:5 storage:5"), 400); + + assertThat(versionTracker.getClusterStateHistory(), + equalTo(Arrays.asList( + historyEntry("version:5 distributor:5 storage:5", 400), + historyEntry("version:4 distributor:4 storage:4", 300)))); + } + + @Test + public void can_get_latest_non_published_candidate_state() { + final StateVersionTracker versionTracker = createWithMockedMetrics(); + + AnnotatedClusterState candidate = stateWithoutAnnotations("distributor:2 storage:2"); + versionTracker.updateLatestCandidateState(candidate); + assertThat(versionTracker.getLatestCandidateState(), equalTo(candidate)); + + candidate = stateWithoutAnnotations("distributor:3 storage:3"); + versionTracker.updateLatestCandidateState(candidate); + assertThat(versionTracker.getLatestCandidateState(), equalTo(candidate)); + } + +} diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java new file mode 100644 index 00000000000..111a2c63144 --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java @@ -0,0 +1,40 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core.matchers; + +import com.yahoo.vespa.clustercontroller.core.ClusterEvent; +import com.yahoo.vespa.clustercontroller.core.NodeEvent; +import org.hamcrest.Description; +import org.hamcrest.Factory; +import org.mockito.ArgumentMatcher; + +public class ClusterEventWithDescription extends ArgumentMatcher<ClusterEvent> { + private final String expected; + + public ClusterEventWithDescription(String expected) { + this.expected = expected; + } + + @Override + public boolean matches(Object o) { + if (!(o instanceof ClusterEvent)) { + return false; + } + return expected.equals(((ClusterEvent) o).getDescription()); + } + + @Override + public void describeTo(Description description) { + description.appendText(String.format("ClusterEvent with description '%s'", expected)); + } + + @Override + public void describeMismatch(Object item, Description description) { + ClusterEvent other = (ClusterEvent)item; + description.appendText(String.format("got description '%s'", other.getDescription())); + } + + @Factory + public static ClusterEventWithDescription clusterEventWithDescription(String description) { + return new ClusterEventWithDescription(description); + } +} diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java new file mode 100644 index 00000000000..1f2372dea29 --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java @@ -0,0 +1,37 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core.matchers; + +import com.yahoo.vdslib.state.Node; +import com.yahoo.vespa.clustercontroller.core.NodeEvent; +import org.hamcrest.Description; +import org.hamcrest.Factory; +import org.mockito.ArgumentMatcher; + +public class EventForNode extends ArgumentMatcher<NodeEvent> { + private final Node expected; + + EventForNode(Node expected) { + this.expected = expected; + } + + @Override + public boolean matches(Object o) { + return ((NodeEvent)o).getNode().getNode().equals(expected); + } + + @Override + public void describeTo(Description description) { + description.appendText(String.format("NodeEvent for node %s", expected)); + } + + @Override + public void describeMismatch(Object item, Description description) { + NodeEvent other = (NodeEvent)item; + description.appendText(String.format("got node %s", other.getNode().getNode())); + } + + @Factory + public static EventForNode eventForNode(Node expected) { + return new EventForNode(expected); + } +} diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java new file mode 100644 index 00000000000..c99505d28ee --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java @@ -0,0 +1,40 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core.matchers; + +import com.yahoo.vespa.clustercontroller.core.Event; +import org.hamcrest.Description; +import org.hamcrest.Factory; +import org.mockito.ArgumentMatcher; + +public class EventTimeIs extends ArgumentMatcher<Event> { + private final long expected; + + public EventTimeIs(long expected) { + this.expected = expected; + } + + @Override + public boolean matches(Object o) { + if (!(o instanceof Event)) { + return false; + } + return expected == ((Event)o).getTimeMs(); + } + + @Override + public void describeTo(Description description) { + description.appendText(String.format("Event with time %d", expected)); + } + + @Override + public void describeMismatch(Object item, Description description) { + Event other = (Event)item; + description.appendText(String.format("event time is %d", other.getTimeMs())); + } + + @Factory + public static EventTimeIs eventTimeIs(long time) { + return new EventTimeIs(time); + } +} + diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java new file mode 100644 index 00000000000..5430bc5d8a3 --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java @@ -0,0 +1,27 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core.matchers; + +import com.yahoo.vespa.clustercontroller.core.NodeEvent; +import org.hamcrest.Factory; +import org.mockito.ArgumentMatcher; + +public class EventTypeIs extends ArgumentMatcher<NodeEvent> { + private final NodeEvent.Type expected; + + public EventTypeIs(NodeEvent.Type expected) { + this.expected = expected; + } + + @Override + public boolean matches(Object o) { + if (!(o instanceof NodeEvent)) { + return false; + } + return expected.equals(((NodeEvent)o).getType()); + } + + @Factory + public static EventTypeIs eventTypeIs(NodeEvent.Type type) { + return new EventTypeIs(type); + } +} diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java new file mode 100644 index 00000000000..a147b9af466 --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java @@ -0,0 +1,49 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core.matchers; + +import com.yahoo.vdslib.state.Node; +import com.yahoo.vespa.clustercontroller.core.NodeStateReason; +import org.hamcrest.Description; +import org.hamcrest.Factory; +import org.mockito.ArgumentMatcher; + +import java.util.Map; + +public class HasStateReasonForNode extends ArgumentMatcher<Map<Node, NodeStateReason>> { + private final Node node; + private final NodeStateReason expected; + + public HasStateReasonForNode(Node node, NodeStateReason expected) { + this.node = node; + this.expected = expected; + } + + @Override + public boolean matches(Object o) { + if (o == null || !(o instanceof Map)) { + return false; + } + return expected == ((Map)o).get(node); + } + + @Override + public void describeTo(Description description) { + description.appendText(String.format("has node state reason %s", expected.toString())); + } + + @Override + public void describeMismatch(Object item, Description description) { + @SuppressWarnings("unchecked") + Map<Node, NodeStateReason> other = (Map<Node, NodeStateReason>)item; + if (other.containsKey(node)) { + description.appendText(String.format("has reason %s", other.get(node).toString())); + } else { + description.appendText("has no entry for node"); + } + } + + @Factory + public static HasStateReasonForNode hasStateReasonForNode(Node node, NodeStateReason reason) { + return new HasStateReasonForNode(node, reason); + } +} diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java new file mode 100644 index 00000000000..5ac89030c23 --- /dev/null +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java @@ -0,0 +1,39 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.clustercontroller.core.matchers; + +import com.yahoo.vespa.clustercontroller.core.NodeEvent; +import org.hamcrest.Description; +import org.hamcrest.Factory; +import org.mockito.ArgumentMatcher; + +public class NodeEventWithDescription extends ArgumentMatcher<NodeEvent> { + private final String expected; + + public NodeEventWithDescription(String expected) { + this.expected = expected; + } + + @Override + public boolean matches(Object o) { + if (!(o instanceof NodeEvent)) { + return false; + } + return expected.equals(((NodeEvent) o).getDescription()); + } + + @Override + public void describeTo(Description description) { + description.appendText(String.format("NodeEvent with description '%s'", expected)); + } + + @Override + public void describeMismatch(Object item, Description description) { + NodeEvent other = (NodeEvent)item; + description.appendText(String.format("got description '%s'", other.getDescription())); + } + + @Factory + public static NodeEventWithDescription nodeEventWithDescription(String description) { + return new NodeEventWithDescription(description); + } +} diff --git a/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java index cd68d214d3d..4268e4f835e 100644 --- a/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java +++ b/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java @@ -41,7 +41,7 @@ class IncludeProcessor implements PreProcessor { Element elem = (Element) list.item(0); Element parent = (Element) elem.getParentNode(); String filename = elem.getAttribute("file"); - boolean required = elem.hasAttribute("required") ? Boolean.parseBoolean(elem.getAttribute("required")) : true; + boolean required = ! elem.hasAttribute("required") || Boolean.parseBoolean(elem.getAttribute("required")); File file = new File(currentFolder, filename); Document subFile = IncludeProcessor.parseIncludeFile(file, parent.getTagName(), required); @@ -76,4 +76,5 @@ class IncludeProcessor implements PreProcessor { w.append(endTag); return XML.getDocument(new StringReader(w.toString())); } + } diff --git a/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java index f3da285f524..32e9aec56cb 100644 --- a/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java +++ b/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java @@ -20,6 +20,7 @@ import java.util.logging.Logger; * @since 5.22 */ class OverrideProcessor implements PreProcessor { + private static final Logger log = Logger.getLogger(OverrideProcessor.class.getName()); private final Environment environment; @@ -140,6 +141,9 @@ class OverrideProcessor implements PreProcessor { } } + if (bestMatch > 1) // there was a region/environment specific overriode + doElementSpecificProcessingOnOverride(bestMatchElement); + // Remove elements not specific for (Element child : children) { if (child != bestMatchElement) { @@ -148,6 +152,14 @@ class OverrideProcessor implements PreProcessor { } } + /** Called on each element which is selected by matching some override condition */ + private void doElementSpecificProcessingOnOverride(Element element) { + // if node capacity is specified explicitly for some evn/region we should require that capacity + if ( element.getTagName().equals("nodes")) + if (element.getChildNodes().getLength() == 0) // specifies capacity, not a list of nodes + element.setAttribute("required", "true"); + } + /** * Retains all elements where at least one element is overridden. Removes non-overridden elements from map. */ diff --git a/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java index 4e08e514504..b70a5054563 100644 --- a/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java +++ b/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java @@ -25,6 +25,7 @@ import java.util.List; * @since 5.22 */ public class XmlPreProcessor { + final static String deployNamespace = "xmlns:deploy"; final static String deployNamespaceUri = "vespa"; final static String preprocessNamespace = "xmlns:preprocess"; @@ -68,4 +69,5 @@ public class XmlPreProcessor { chain.add(new PropertiesProcessor()); return chain; } + } diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java index 393bd1c2de7..06ecede09a5 100644 --- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java +++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java @@ -7,12 +7,10 @@ import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.provision.Version; import com.yahoo.path.Path; import com.yahoo.io.reader.NamedReader; -import com.yahoo.log.LogLevel; import java.io.File; import java.io.FilenameFilter; import java.io.IOException; -import java.util.LinkedList; import java.util.List; import java.util.Optional; @@ -24,7 +22,6 @@ import java.util.Optional; public class ApplicationPackageXmlFilesValidator { private final AppSubDirs appDirs; - private final DeployLogger logger; private final Optional<Version> vespaVersion; private static final FilenameFilter xmlFilter = new FilenameFilter() { @@ -34,31 +31,32 @@ public class ApplicationPackageXmlFilesValidator { } }; - public ApplicationPackageXmlFilesValidator(AppSubDirs appDirs, DeployLogger logger, Optional<Version> vespaVersion) { + + public ApplicationPackageXmlFilesValidator(AppSubDirs appDirs, Optional<Version> vespaVersion) { this.appDirs = appDirs; - this.logger = logger; this.vespaVersion = vespaVersion; } - public static ApplicationPackageXmlFilesValidator createDefaultXMLValidator(File appDir, DeployLogger logger, Optional<Version> vespaVersion) { - return new ApplicationPackageXmlFilesValidator(new AppSubDirs(appDir), logger, vespaVersion); + // TODO: Remove when no version older than 6.33 is used + public ApplicationPackageXmlFilesValidator(AppSubDirs appDirs, DeployLogger logger, Optional<Version> vespaVersion) { + this.appDirs = appDirs; + this.vespaVersion = vespaVersion; } - public static ApplicationPackageXmlFilesValidator createTestXmlValidator(File appDir) { - return new ApplicationPackageXmlFilesValidator(new AppSubDirs(appDir), new BaseDeployLogger(), Optional.<Version>empty()); + public static ApplicationPackageXmlFilesValidator createDefaultXMLValidator(File appDir, Optional<Version> vespaVersion) { + return new ApplicationPackageXmlFilesValidator(new AppSubDirs(appDir), vespaVersion); } - // Verify that files a and b does not coexist. - private void checkConflicts(String a, String b) throws IllegalArgumentException { - if (appDirs.file(a).exists() && appDirs.file(b).exists()) - throw new IllegalArgumentException("Application package in " + appDirs.root() + " contains both " + a + " and " + b + - ", please use just one of them"); + public static ApplicationPackageXmlFilesValidator createTestXmlValidator(File appDir) { + return new ApplicationPackageXmlFilesValidator(new AppSubDirs(appDir), Optional.<Version>empty()); } @SuppressWarnings("deprecation") public void checkApplication() throws IOException { validateHostsFile(SchemaValidator.hostsXmlSchemaName); validateServicesFile(SchemaValidator.servicesXmlSchemaName); + // TODO: Disable temporarily, need to get out feature to support ignoring validation errors + //validateDeploymentFile(SchemaValidator.deploymentXmlSchemaName); if (appDirs.searchdefinitions().exists()) { if (FilesApplicationPackage.getSearchDefinitionFiles(appDirs.root()).isEmpty()) { @@ -85,7 +83,6 @@ public class ApplicationPackageXmlFilesValidator { if (appDirs.file(FilesApplicationPackage.HOSTS).exists()) { validate(hostsXmlSchemaName, FilesApplicationPackage.HOSTS); } - } private void validateServicesFile(String servicesXmlSchemaName) throws IOException { @@ -93,6 +90,12 @@ public class ApplicationPackageXmlFilesValidator { validate(servicesXmlSchemaName, servicesFileName()); } + private void validateDeploymentFile(String deploymentXmlSchemaName) throws IOException { + if (appDirs.file(FilesApplicationPackage.DEPLOYMENT_FILE.getName()).exists()) { + validate(deploymentXmlSchemaName, FilesApplicationPackage.DEPLOYMENT_FILE.getName()); + } + } + private void validate(String schemaName, String xmlFileName) throws IOException { createSchemaValidator(schemaName, vespaVersion).validate(appDirs.file(xmlFileName)); } diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java index 3b85e617f87..002c31d5910 100644 --- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java +++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java @@ -44,10 +44,8 @@ import java.net.URL; import java.security.MessageDigest; import java.util.*; import java.util.jar.JarFile; -import java.util.logging.Level; import java.util.logging.Logger; -import static com.yahoo.io.IOUtils.readAll; import static com.yahoo.text.Lowercase.toLowerCase; @@ -626,13 +624,27 @@ public class FilesApplicationPackage implements ApplicationPackage { } @Override + public void validateXML() throws IOException { + validateXML(Optional.empty()); + } + + // TODO: Remove when no version older than 6.33 is used + @Override public void validateXML(DeployLogger logger) throws IOException { - validateXML(logger, Optional.empty()); + validateXML(Optional.empty()); + } + + @Override + public void validateXML(Optional<Version> vespaVersion) throws IOException { + ApplicationPackageXmlFilesValidator xmlFilesValidator = ApplicationPackageXmlFilesValidator.createDefaultXMLValidator(appDir, vespaVersion); + xmlFilesValidator.checkApplication(); + ApplicationPackageXmlFilesValidator.checkIncludedDirs(this); } + // TODO: Remove when no version older than 6.33 is used @Override public void validateXML(DeployLogger logger, Optional<Version> vespaVersion) throws IOException { - ApplicationPackageXmlFilesValidator xmlFilesValidator = ApplicationPackageXmlFilesValidator.createDefaultXMLValidator(appDir, logger, vespaVersion); + ApplicationPackageXmlFilesValidator xmlFilesValidator = ApplicationPackageXmlFilesValidator.createDefaultXMLValidator(appDir, vespaVersion); xmlFilesValidator.checkApplication(); ApplicationPackageXmlFilesValidator.checkIncludedDirs(this); } @@ -659,10 +671,10 @@ public class FilesApplicationPackage implements ApplicationPackage { @Override public ApplicationPackage preprocess(Zone zone, RuleConfigDeriver ignored, DeployLogger logger) throws IOException, TransformerException, ParserConfigurationException, SAXException { IOUtils.recursiveDeleteDir(preprocessedDir); - IOUtils.copyDirectory(appDir, preprocessedDir, -1, (dir, name) -> !name.equals(".preprocessed") && - !name.equals(SERVICES) && - !name.equals(HOSTS) && - !name.equals(CONFIG_DEFINITIONS_DIR)); + IOUtils.copyDirectory(appDir, preprocessedDir, -1, (dir, name) -> ! name.equals(".preprocessed") && + ! name.equals(SERVICES) && + ! name.equals(HOSTS) && + ! name.equals(CONFIG_DEFINITIONS_DIR)); preprocessXML(new File(preprocessedDir, SERVICES), getServicesFile(), zone); if (getHostsFile().exists()) { preprocessXML(new File(preprocessedDir, HOSTS), getHostsFile(), zone); diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java index 334fda6e6eb..ce63ad23852 100644 --- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java +++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java @@ -35,8 +35,4 @@ public class MockFileRegistry implements FileRegistry { return result; } - @Override - public Set<String> allRelativePaths() { - return Collections.emptySet(); - } } diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java index 67a24e0159b..ed4ccf51ff7 100644 --- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java +++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java @@ -83,11 +83,6 @@ public class PreGeneratedFileRegistry implements FileRegistry { } @Override - public Set<String> allRelativePaths() { - return path2Hash.keySet(); - } - - @Override public List<Entry> export() { List<Entry> entries = new ArrayList<>(); for (Map.Entry<String, String> entry : path2Hash.entrySet()) { diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java index a28a17dc831..698fa8fdce7 100644 --- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java +++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java @@ -43,6 +43,7 @@ public class SchemaValidator { public static final String schemaDirBase = System.getProperty("java.io.tmpdir", File.separator + "tmp" + File.separator + "vespa"); static final String servicesXmlSchemaName = "services.rnc"; static final String hostsXmlSchemaName = "hosts.rnc"; + static final String deploymentXmlSchemaName = "deployment.rnc"; private final CustomErrorHandler errorHandler = new CustomErrorHandler(); private final ValidationDriver driver; private DeployLogger deployLogger; @@ -91,6 +92,15 @@ public class SchemaValidator { return new SchemaValidator(hostsXmlSchemaName); } + /** + * Create a validator for deployment.xml for tests + * + * @throws IOException if it is not possible to read schema files + */ + public static SchemaValidator createTestValidatorDeployment() throws IOException { + return new SchemaValidator(deploymentXmlSchemaName); + } + private class CustomErrorHandler implements ErrorHandler { volatile String fileName; diff --git a/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTest.java b/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTest.java new file mode 100644 index 00000000000..338302e9e57 --- /dev/null +++ b/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTest.java @@ -0,0 +1,128 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.config.application; + +import com.yahoo.config.provision.Environment; +import com.yahoo.config.provision.RegionName; +import org.custommonkey.xmlunit.XMLUnit; +import org.junit.Test; +import org.w3c.dom.Document; +import org.xml.sax.SAXException; + +import javax.xml.parsers.ParserConfigurationException; +import javax.xml.stream.XMLStreamException; +import javax.xml.transform.TransformerException; +import java.io.IOException; +import java.io.StringReader; + +/** + * @author bratseth + */ +public class HostedOverrideProcessorTest { + + static { + XMLUnit.setIgnoreWhitespace(true); + } + + private static final String input = + "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" + + "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" + + " <container id=\"foo\" version=\"1.0\">" + + " <nodes count='1'/>" + + " <nodes deploy:environment=\"staging\" count='2'/>" + + " <nodes deploy:environment=\"prod\" count='3'/>" + + " <nodes deploy:environment=\"prod\" deploy:region=\"us-west\" count='4'/>" + + " </container>" + + "</services>"; + + + @Test + public void testParsingDefault() throws IOException, SAXException, XMLStreamException, ParserConfigurationException, TransformerException { + String expected = + "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" + + "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" + + " <container id=\"foo\" version=\"1.0\">" + + " <nodes count='1'/>" + + " </container>" + + "</services>"; + assertOverride(Environment.test, RegionName.defaultName(), expected); + } + + @Test + public void testParsingEnvironmentAndRegion() throws ParserConfigurationException, IOException, SAXException, TransformerException { + String expected = + "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" + + "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" + + " <container id=\"foo\" version=\"1.0\">" + + " <nodes count='4' required='true'/>" + + " </container>" + + "</services>"; + assertOverride(Environment.from("prod"), RegionName.from("us-west"), expected); + } + + @Test + public void testParsingEnvironmentUnknownRegion() throws ParserConfigurationException, IOException, SAXException, TransformerException { + String expected = + "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" + + "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" + + " <container id=\"foo\" version=\"1.0\">" + + " <nodes count='3' required='true'/>" + + " </container>" + + "</services>"; + assertOverride(Environment.valueOf("prod"), RegionName.from("us-east"), expected); + } + + @Test + public void testParsingEnvironmentNoRegion() throws ParserConfigurationException, IOException, SAXException, TransformerException { + String expected = + "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" + + "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" + + " <container id=\"foo\" version=\"1.0\">" + + " <nodes count='3' required='true'/>" + + " </container>" + + "</services>"; + assertOverride(Environment.from("prod"), RegionName.defaultName(), expected); + } + + @Test + public void testParsingUnknownEnvironment() throws ParserConfigurationException, IOException, SAXException, TransformerException { + String expected = + "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" + + "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" + + " <container id=\"foo\" version=\"1.0\">" + + " <nodes count='1'/>" + + " </container>" + + "</services>"; + assertOverride(Environment.from("dev"), RegionName.defaultName(), expected); + } + + @Test + public void testParsingUnknownEnvironmentUnknownRegion() throws ParserConfigurationException, IOException, SAXException, TransformerException { + String expected = + "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" + + "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" + + " <container id=\"foo\" version=\"1.0\">" + + " <nodes count='1'/>" + + " </container>" + + "</services>"; + assertOverride(Environment.from("test"), RegionName.from("us-west"), expected); + } + + @Test + public void testParsingInheritEnvironment() throws ParserConfigurationException, IOException, SAXException, TransformerException { + String expected = + "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" + + "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" + + " <container id=\"foo\" version=\"1.0\">" + + " <nodes count='2' required='true'/>" + + " </container>" + + "</services>"; + assertOverride(Environment.from("staging"), RegionName.from("us-west"), expected); + } + + private void assertOverride(Environment environment, RegionName region, String expected) throws TransformerException { + Document inputDoc = Xml.getDocument(new StringReader(input)); + Document newDoc = new OverrideProcessor(environment, region).process(inputDoc); + TestBase.assertDocument(expected, newDoc); + } + +} diff --git a/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java b/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java index 07068e236cd..6d9bf2cbfa5 100644 --- a/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java +++ b/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java @@ -17,6 +17,7 @@ import java.nio.file.NoSuchFileException; * @since 5.22 */ public class IncludeProcessorTest { + @Test public void testInclude() throws IOException, SAXException, XMLStreamException, ParserConfigurationException, TransformerException { File app = new File("src/test/resources/multienvapp"); @@ -68,7 +69,7 @@ public class IncludeProcessorTest { "</jdisc></services>"; Document doc = (new IncludeProcessor(app)).process(docBuilder.parse(Xml.getServices(app))); - System.out.println(Xml.documentAsString(doc)); + // System.out.println(Xml.documentAsString(doc)); TestBase.assertDocument(expected, doc); } @@ -78,4 +79,5 @@ public class IncludeProcessorTest { DocumentBuilder docBuilder = Xml.getPreprocessDocumentBuilder(); (new IncludeProcessor(app)).process(docBuilder.parse(Xml.getServices(app))); } + } diff --git a/config-application-package/src/test/java/com/yahoo/config/application/XmlPreprocessorTest.java b/config-application-package/src/test/java/com/yahoo/config/application/XmlPreprocessorTest.java index eecbb1e7313..f6528e84368 100644 --- a/config-application-package/src/test/java/com/yahoo/config/application/XmlPreprocessorTest.java +++ b/config-application-package/src/test/java/com/yahoo/config/application/XmlPreprocessorTest.java @@ -83,7 +83,7 @@ public class XmlPreprocessorTest { "</services>"; Document docUsWest = (new XmlPreProcessor(appDir, services, Environment.prod, RegionName.from("us-west"))).run(); - System.out.println(Xml.documentAsString(docUsWest)); + // System.out.println(Xml.documentAsString(docUsWest)); TestBase.assertDocument(expectedUsWest, docUsWest); String expectedUsEast = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><services xmlns:deploy=\"vespa\" xmlns:preprocess=\"properties\" version=\"1.0\">\n" + @@ -162,4 +162,5 @@ public class XmlPreprocessorTest { Document docDev = (new XmlPreProcessor(appDir, new StringReader(input), Environment.prod, RegionName.from("default")).run()); TestBase.assertDocument(expectedProd, docDev); } + } diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java index 2a4751af083..a5fb7a152d8 100644 --- a/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java +++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java @@ -217,14 +217,24 @@ public interface ApplicationPackage { throw new UnsupportedOperationException("This application package cannot return file references"); } + // TODO: Remove when no version older than 6.33 is in use default void validateXML(DeployLogger logger) throws IOException { throw new UnsupportedOperationException("This application package cannot validate XML"); } + default void validateXML() throws IOException { + throw new UnsupportedOperationException("This application package cannot validate XML"); + } + + // TODO: Remove when no version older than 6.33 is in use default void validateXML(DeployLogger logger, Optional<Version> vespaVersion) throws IOException { throw new UnsupportedOperationException("This application package cannot validate XML"); } + default void validateXML(Optional<Version> vespaVersion) throws IOException { + throw new UnsupportedOperationException("This application package cannot validate XML"); + } + default void writeMetaData() throws IOException { throw new UnsupportedOperationException("This application package cannot write its metadata"); } diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ComponentInfo.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ComponentInfo.java index 42732ddfc47..52aaa148c1b 100644 --- a/config-model-api/src/main/java/com/yahoo/config/application/api/ComponentInfo.java +++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ComponentInfo.java @@ -8,6 +8,7 @@ package com.yahoo.config.application.api; * @author tonytv */ public class ComponentInfo { + final String pathRelativeToAppDir; public ComponentInfo(String pathRelativeToAppDir) { @@ -18,4 +19,8 @@ public class ComponentInfo { public String getPathRelativeToAppDir() { return pathRelativeToAppDir; } + + @Override + public String toString() { return "component at '" + pathRelativeToAppDir + "'"; } + } diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java b/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java index fe4aab72cb0..8b211d1d400 100644 --- a/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java +++ b/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java @@ -18,8 +18,6 @@ public interface FileRegistry { */ String fileSourceHost(); - Set<String> allRelativePaths(); - List<Entry> export(); class Entry { diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigServerSpec.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigServerSpec.java index 0a43f190675..499c43906e2 100644 --- a/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigServerSpec.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigServerSpec.java @@ -7,8 +7,10 @@ package com.yahoo.config.model.api; * @author tonytv */ public interface ConfigServerSpec { - public String getHostName(); - public int getConfigServerPort(); - public int getHttpPort(); - public int getZooKeeperPort(); + + String getHostName(); + int getConfigServerPort(); + int getHttpPort(); + int getZooKeeperPort(); + } diff --git a/config-model/pom.xml b/config-model/pom.xml index 42497224b0b..57c5725ec57 100644 --- a/config-model/pom.xml +++ b/config-model/pom.xml @@ -483,6 +483,16 @@ <classpathScope>compile</classpathScope> </configuration> </execution> + <execution> + <id>test-schema-files</id> + <goals> + <goal>exec</goal> + </goals> + <phase>test</phase> + <configuration> + <executable>src/test/sh/test-schema.sh</executable> + </configuration> + </execution> </executions> </plugin> <plugin> diff --git a/config-model/src/main/Makefile b/config-model/src/main/Makefile index 5e7024ccff9..c3dfd0c2e3e 100644 --- a/config-model/src/main/Makefile +++ b/config-model/src/main/Makefile @@ -3,7 +3,7 @@ trangjar=../../target/trang.jar -all: resources/schema/services.rng resources/schema/hosts.rng resources/schema/container-include.rng resources/schema/services.xsd resources/schema/hosts.xsd resources/schema/container-include.xsd +all: resources/schema/services.rng resources/schema/hosts.rng resources/schema/container-include.rng resources/schema/services.xsd resources/schema/hosts.xsd resources/schema/container-include.xsd resources/schema/deployment.xsd resources/schema/services.rng: resources/schema/services.rnc resources/schema/common.rnc resources/schema/admin.rnc resources/schema/clients.rnc resources/schema/docproc.rnc resources/schema/routing.rnc resources/schema/clients-v2.rnc resources/schema/content.rnc resources/schema/genericmodule.rnc resources/schema/legacygenericcluster.rnc resources/schema/genericcluster.rnc resources/schema/legacygenericmodule.rnc resources/schema/containercluster.rnc java -jar $(trangjar) -I rnc -O rng resources/schema/services.rnc resources/schema/services.rng @@ -25,6 +25,12 @@ resources/schema/hosts.rng: resources/schema/hosts.rnc resources/schema/hosts.xsd: resources/schema/hosts.rng java -jar $(trangjar) -I rng -O xsd resources/schema/hosts.rng resources/schema/hosts.xsd +resources/schema/deployment.rng: resources/schema/deployment.rnc + java -jar $(trangjar) -I rnc -O rng resources/schema/deployment.rnc resources/schema/deployment.rng + +resources/schema/deployment.xsd: resources/schema/deployment.rng + java -jar $(trangjar) -I rng -O xsd resources/schema/deployment.rng resources/schema/deployment.xsd + clean: rm -f resources/schema/*.rng rm -f resources/schema/*.xsd diff --git a/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java b/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java index e6df94c8855..0b0ac77443c 100644 --- a/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java +++ b/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java @@ -265,6 +265,7 @@ public class ApplicationConfigProducerRoot extends AbstractConfigProducer<Abstra } public FileDistributionConfigProducer getFileDistributionConfigProducer() { + if (admin == null) return null; // no admin if standalone return admin.getFileDistributionConfigProducer(); } diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployProperties.java index 3f6ef494a27..c0ac67e6da8 100644 --- a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployProperties.java +++ b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployProperties.java @@ -14,6 +14,7 @@ import java.util.List; * @since 5.17 */ public class DeployProperties { + private final boolean multitenant; private final ApplicationId applicationId; private final List<ConfigServerSpec> serverSpecs = new ArrayList<>(); @@ -46,10 +47,6 @@ public class DeployProperties { return serverSpecs; } - public Quota quota() { - return new Quota(Integer.MAX_VALUE); - } - public boolean hostedVespa() { return hostedVespa; } @@ -67,7 +64,6 @@ public class DeployProperties { private ApplicationId applicationId = ApplicationId.defaultId(); private boolean multitenant = false; private List<ConfigServerSpec> configServerSpecs = new ArrayList<>(); - private Quota quota = new Quota(Integer.MAX_VALUE); private boolean hostedVespa = false; private Version vespaVersion = Version.fromIntValues(1, 0, 0); private Zone zone = Zone.defaultZone(); @@ -87,11 +83,6 @@ public class DeployProperties { return this; } - public Builder quota(Quota quota) { - this.quota = quota; - return this; - } - public Builder vespaVersion(Version version) { this.vespaVersion = version; return this; diff --git a/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java b/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java index 8e1097907f1..8778107cd8a 100644 --- a/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java +++ b/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java @@ -105,10 +105,10 @@ public abstract class AbstractConfigProducer<CHILD extends AbstractConfigProduce child.setParent(this); if (childrenBySubId.get(child.getSubId()) != null) { throw new IllegalArgumentException("Multiple services/instances of the id '" + child.getSubId() + "' under the service/instance " + - errorMsgClassName() + " '" + subId + "'. (This is commonly caused by service/node index " + - "collisions in the config.)." + - "\nExisting instance: " + childrenBySubId.get(child.getSubId()) + - "\nAttempted to add: " + child); + errorMsgClassName() + " '" + subId + "'. (This is commonly caused by service/node index " + + "collisions in the config.)." + + "\nExisting instance: " + childrenBySubId.get(child.getSubId()) + + "\nAttempted to add: " + child); } childrenBySubId.put(child.getSubId(), child); diff --git a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java index 5c9d03b434f..c4ac4d91001 100644 --- a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java +++ b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java @@ -101,8 +101,9 @@ public class InMemoryProvisioner implements HostProvisioner { throw new IllegalArgumentException("Requested " + requestedCapacity.nodeCount() + " nodes in " + groups + " groups, but the node count is not divisible into this number of groups"); - int capacity = failOnOutOfCapacity ? requestedCapacity.nodeCount() : - Math.min(requestedCapacity.nodeCount(), freeNodes.get("default").size() + totalAllocatedTo(cluster)); + int capacity = failOnOutOfCapacity || requestedCapacity.isRequired() + ? requestedCapacity.nodeCount() + : Math.min(requestedCapacity.nodeCount(), freeNodes.get("default").size() + totalAllocatedTo(cluster)); if (groups > capacity) groups = capacity; @@ -138,7 +139,7 @@ public class InMemoryProvisioner implements HostProvisioner { int nextIndex = nextIndexInCluster.getOrDefault(new Pair<>(clusterGroup.type(), clusterGroup.id()), startIndex); while (allocation.size() < nodesInGroup) { - if (freeNodes.get(flavor).isEmpty()) throw new IllegalArgumentException("No nodes of flavor '" + flavor + "' available"); + if (freeNodes.get(flavor).isEmpty()) throw new IllegalArgumentException("Insufficient capacity of flavor '" + flavor + "'"); Host newHost = freeNodes.removeValue(flavor, 0); ClusterMembership membership = ClusterMembership.from(clusterGroup, nextIndex++); allocation.add(new HostSpec(newHost.hostname(), newHost.aliases(), membership)); diff --git a/config-model/src/main/java/com/yahoo/config/model/provision/SingleNodeProvisioner.java b/config-model/src/main/java/com/yahoo/config/model/provision/SingleNodeProvisioner.java index 1d5544873d9..fe8b3935fcf 100644 --- a/config-model/src/main/java/com/yahoo/config/model/provision/SingleNodeProvisioner.java +++ b/config-model/src/main/java/com/yahoo/config/model/provision/SingleNodeProvisioner.java @@ -27,10 +27,11 @@ public class SingleNodeProvisioner implements HostProvisioner { public SingleNodeProvisioner() { try { host = new Host(HostSystem.lookupCanonicalHostname(HostName.getLocalhost())); - } catch (UnknownHostException e) { + this.hostSpec = new HostSpec(host.hostname(), host.aliases()); + } + catch (UnknownHostException e) { throw new RuntimeException(e); } - this.hostSpec = new HostSpec(host.hostname(), host.aliases()); } @Override diff --git a/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java b/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java index 731410c9bf3..c30c62b44bc 100644 --- a/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java +++ b/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java @@ -232,6 +232,16 @@ public class MockApplicationPackage implements ApplicationPackage { " </host>" + "</hosts>"; + + @Override + public void validateXML() throws IOException { + if (failOnValidateXml) { + throw new IllegalArgumentException("Error in application package"); + } else { + throw new UnsupportedOperationException("This application package cannot validate XML"); + } + } + @Override public void validateXML(DeployLogger logger) throws IOException { if (failOnValidateXml) { diff --git a/config-model/src/main/java/com/yahoo/config/model/test/MockRoot.java b/config-model/src/main/java/com/yahoo/config/model/test/MockRoot.java index fa84cf1c7eb..314060e7543 100644 --- a/config-model/src/main/java/com/yahoo/config/model/test/MockRoot.java +++ b/config-model/src/main/java/com/yahoo/config/model/test/MockRoot.java @@ -35,7 +35,9 @@ import java.util.Set; * * @author gjoranv */ +// TODO: mockRoot instances can probably be replaced by VespaModel.createIncomplete public class MockRoot extends AbstractConfigProducerRoot { + private static final long serialVersionUID = 1L; public static final String MOCKHOST = "mockhost"; diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankingConstant.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankingConstant.java index 2a4e231dee4..5b3ce7136d5 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/RankingConstant.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/RankingConstant.java @@ -26,6 +26,7 @@ public class RankingConstant { public String getName() { return name; } public String getFileName() { return fileName; } public String getFileReference() { return fileRef; } + public TensorType getTensorType() { return tensorType; } public String getType() { return tensorType.toString(); } public void validate() { diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java index 2ab634801c2..9032f913d0b 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java @@ -51,6 +51,7 @@ public class Search implements Serializable { private boolean documentsOnly = false; // The stemming setting of this search definition. Default is SHORTEST. + // TODO: Change to Stemming.BEST on Vespa 7 private Stemming stemming = Stemming.SHORTEST; // Documents contained in this definition. diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java index e98ee662b3a..0d8d21400aa 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java @@ -420,7 +420,7 @@ public class IndexInfo extends Derived implements IndexInfoConfig.Producer { if (active != null) { return active; } - // assume default + // assume default: TODO: Change to Stemming.BEST on Vespa 7 return Stemming.SHORTEST; } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/Stemming.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/Stemming.java index f471201f55e..5b145051de5 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/document/Stemming.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/Stemming.java @@ -17,13 +17,17 @@ public enum Stemming { /** No stemming */ NONE("none"), - /** Stem as much as possible */ + /** @deprecated incorrectly don't stem at all */ + @Deprecated ALL("all"), /** select shortest possible stem */ SHORTEST("shortest"), - /** index (and query?) multiple stems */ + /** select the "best" stem alternative */ + BEST("best"), + + /** index multiple stems */ MULTIPLE("multiple"); private static Logger log=Logger.getLogger(Stemming.class.getName()); @@ -36,6 +40,7 @@ public enum Stemming { * * @throws IllegalArgumentException if there is no stemming type with the given name */ + @SuppressWarnings("deprecation") public static Stemming get(String stemmingName) { try { Stemming stemming = Stemming.valueOf(stemmingName.toUpperCase()); @@ -49,7 +54,7 @@ public enum Stemming { } } - private Stemming(String name) { + Stemming(String name) { this.name = name; } @@ -59,14 +64,16 @@ public enum Stemming { return "stemming " + name; } + @SuppressWarnings("deprecation") public StemMode toStemMode() { - if (this == Stemming.SHORTEST) { - return StemMode.SHORTEST; - } - if (this == Stemming.MULTIPLE) { - return StemMode.ALL; + switch(this) { + case SHORTEST: return StemMode.SHORTEST; + case MULTIPLE: return StemMode.ALL; + case BEST : return StemMode.BEST; + case NONE: return StemMode.NONE; + case ALL: return StemMode.SHORTEST; // Intentional; preserve historic behavior + default: throw new IllegalStateException("Inconvertible stem mode " + this); } - return StemMode.NONE; } } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/Client.java b/config-model/src/main/java/com/yahoo/vespa/model/Client.java index 15685f5f669..2a2498cc310 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/Client.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/Client.java @@ -8,7 +8,7 @@ import com.yahoo.config.model.producer.AbstractConfigProducer; * This is a placeholder config producer that makes global configuration available through a single identifier. This * is added directly to the {@link ApplicationConfigProducerRoot} producer, and so can be accessed by the simple "client" identifier. * - * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a> + * @author Simon Thoresen */ public class Client extends AbstractConfigProducer { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/ConfigProducer.java b/config-model/src/main/java/com/yahoo/vespa/model/ConfigProducer.java index 852e4e73331..aaeedf10bc8 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/ConfigProducer.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/ConfigProducer.java @@ -19,44 +19,38 @@ import com.yahoo.config.model.producer.UserConfigRepo; */ public interface ConfigProducer extends com.yahoo.config.ConfigInstance.Producer { - /** - * @return the configId of this ConfigProducer. - */ - public String getConfigId(); + /** Returns the configId of this ConfigProducer. */ + String getConfigId(); - /** - * @return The one and only HostSystem of the root node - */ - public HostSystem getHostSystem(); + /** Returns the one and only HostSystem of the root node */ + HostSystem getHostSystem(); /** Returns the user configs of this */ - public UserConfigRepo getUserConfigs(); + UserConfigRepo getUserConfigs(); - /** - * @return this ConfigProducer's children (only 1st level) - */ - public Map<String,? extends ConfigProducer> getChildren(); + /** Returns this ConfigProducer's children (only 1st level) */ + Map<String,? extends ConfigProducer> getChildren(); - /** - * @return a List of all Services that are descendants to this ConfigProducer - */ - public List<Service> getDescendantServices(); + /** Returns a List of all Services that are descendants to this ConfigProducer */ + List<Service> getDescendantServices(); /** * Writes files that need to be written. The files will usually * only be written when the Vespa model is generated through the * deploy-application script. - * gv: This is primarily intended for debugging. + * This is primarily intended for debugging. + * * @param directory directory to write files to * @throws java.io.IOException if writing fails */ - public void writeFiles(File directory) throws IOException; + void writeFiles(File directory) throws IOException; /** * Dump the three of config producers to the specified stream. + * * @param out The stream to print to, e.g. System.out */ - public void dump(PrintStream out); + void dump(PrintStream out); /** * Build config from this and all parent ConfigProducers, @@ -74,11 +68,12 @@ public interface ConfigProducer extends com.yahoo.config.ConfigInstance.Producer * @param builder The ConfigBuilder to add user config overrides. * @return true if overrides were added, false if not. */ - public boolean addUserConfig(ConfigInstance.Builder builder); + boolean addUserConfig(ConfigInstance.Builder builder); /** * check constraints depending on the state of the vespamodel graph. * When overriding, you must invoke super. */ - public void validate() throws Exception; + void validate() throws Exception; + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java index d3e922c69dc..2d825e3332d 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java @@ -5,12 +5,21 @@ import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.model.api.HostProvisioner; import com.yahoo.config.model.producer.AbstractConfigProducer; import com.yahoo.config.model.test.MockRoot; -import com.yahoo.config.provision.*; +import com.yahoo.config.provision.Capacity; +import com.yahoo.config.provision.ClusterMembership; +import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.HostSpec; +import com.yahoo.config.provision.ProvisionLogger; import com.yahoo.net.HostName; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.*; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; import java.util.logging.Level; import java.util.stream.Collectors; @@ -73,6 +82,7 @@ public class HostSystem extends AbstractConfigProducer<Host> { * @return The canonical hostname, or null if unable to resolve. * @throws UnknownHostException if the hostname cannot be resolved */ + // public - This is used by amenders outside this repo public static String lookupCanonicalHostname(String hostname) throws UnknownHostException { return java.net.InetAddress.getByName(hostname).getCanonicalHostName(); } @@ -87,7 +97,7 @@ public class HostSystem extends AbstractConfigProducer<Host> { if (ipAddresses.containsKey(hostname)) return ipAddresses.get(hostname); String ipAddress; - if (hostname.startsWith(MockRoot.MOCKHOST)) { + if (hostname.startsWith(MockRoot.MOCKHOST)) { // TODO: Remove ipAddress = "0.0.0.0"; } else { try { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/PlainFormatter.java b/config-model/src/main/java/com/yahoo/vespa/model/PlainFormatter.java deleted file mode 100644 index d424f4fa31b..00000000000 --- a/config-model/src/main/java/com/yahoo/vespa/model/PlainFormatter.java +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.model; - -import java.util.logging.Formatter; -import java.util.logging.LogRecord; - -/** - * A log formatter that returns a plain log message only with level, not - * including timestamp and method (as java.util.logging.SimpleFormatter). - * See bug #1789867. - * - * @author gjoranv - */ -public class PlainFormatter extends Formatter { - - public PlainFormatter() { - super(); - } - - public String format(LogRecord record) { - StringBuffer sb = new StringBuffer(); - - sb.append(record.getLevel().getName()).append(": "); - sb.append(formatMessage(record)).append("\n"); - - return sb.toString(); - } -} diff --git a/config-model/src/main/java/com/yahoo/vespa/model/PortsMeta.java b/config-model/src/main/java/com/yahoo/vespa/model/PortsMeta.java index ea2151f9976..a0b3cc7294b 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/PortsMeta.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/PortsMeta.java @@ -7,11 +7,12 @@ import java.util.LinkedList; import java.util.List; /** - * Track metainformation about the ports of a service. + * Track meta information about the ports of a service. * * @author Vidar Larsen */ public class PortsMeta implements Serializable { + /** A list of all ports. The list elements are lists of strings. */ private List<LinkedList<String>> ports; diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java index 9a23be1f5c5..bdba3549033 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java @@ -100,6 +100,8 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri /** The validation overrides of this. This is never null. */ private final ValidationOverrides validationOverrides; + + private final FileDistributor fileDistributor; /** Creates a Vespa Model from internal model types only */ public VespaModel(ApplicationPackage app) throws IOException, SAXException { @@ -130,23 +132,38 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri * @param deployState the global deploy state to use for this model. */ public VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState) throws IOException, SAXException { + this(configModelRegistry, deployState, true, null); + } + + private VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState, boolean complete, FileDistributor fileDistributor) throws IOException, SAXException { super("vespamodel"); this.deployState = deployState; this.validationOverrides = deployState.validationOverrides(); configModelRegistry = new VespaConfigModelRegistry(configModelRegistry); VespaModelBuilder builder = new VespaDomBuilder(); root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this); - configModelRepo.readConfigModels(deployState, builder, root, configModelRegistry); - addServiceClusters(deployState.getApplicationPackage(), builder); - setupRouting(); - log.log(LogLevel.DEBUG, "hostsystem=" + getHostSystem()); - this.info = Optional.of(createProvisionInfo()); - getAdmin().addPerHostServices(getHostSystem().getHosts(), deployState.getProperties()); - freezeModelTopology(); - root.prepare(configModelRepo); - configModelRepo.prepareConfigModels(); - validateWrapExceptions(); - this.deployState = null; + if (complete) { // create a a completed, frozen model + configModelRepo.readConfigModels(deployState, builder, root, configModelRegistry); + addServiceClusters(deployState.getApplicationPackage(), builder); + this.info = Optional.of(createProvisionInfo()); // must happen after the two lines above + setupRouting(); + this.fileDistributor = root.getFileDistributionConfigProducer().getFileDistributor(); + getAdmin().addPerHostServices(getHostSystem().getHosts(), deployState.getProperties()); + freezeModelTopology(); + root.prepare(configModelRepo); + configModelRepo.prepareConfigModels(); + validateWrapExceptions(); + this.deployState = null; + } + else { // create a model with no services instantiated and the given file distributor + this.info = Optional.of(createProvisionInfo()); + this.fileDistributor = fileDistributor; + } + } + + /** Creates a mutable model with no services instantiated */ + public static VespaModel createIncomplete(DeployState deployState) throws IOException, SAXException { + return new VespaModel(new NullConfigModelRegistry(), deployState, false, new FileDistributor(deployState.getFileRegistry())); } private ProvisionInfo createProvisionInfo() { @@ -192,7 +209,8 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri } public FileDistributor getFileDistributor() { - return root.getFileDistributionConfigProducer().getFileDistributor(); + // return root.getFileDistributionConfigProducer().getFileDistributor(); + return fileDistributor; } /** Returns this models Vespa instance */ @@ -437,9 +455,8 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri @Override public DeployState getDeployState() { - if (deployState == null) { + if (deployState == null) throw new IllegalStateException("Cannot call getDeployState() once model has been built"); - } return deployState; } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java index 9b234435ce2..adbd4d7bae1 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java @@ -4,7 +4,6 @@ package com.yahoo.vespa.model; import com.google.inject.Inject; import com.yahoo.component.provider.ComponentRegistry; import com.yahoo.config.application.api.ApplicationPackage; -import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.model.ConfigModelRegistry; import com.yahoo.config.model.MapConfigModelRegistry; import com.yahoo.config.model.NullConfigModelRegistry; @@ -89,7 +88,6 @@ public class VespaModelFactory implements ModelFactory { if (modelContext.appDir().isPresent()) { ApplicationPackageXmlFilesValidator validator = ApplicationPackageXmlFilesValidator.createDefaultXMLValidator(modelContext.appDir().get(), - modelContext.deployLogger(), modelContext.vespaVersion()); try { validator.checkApplication(); @@ -101,7 +99,7 @@ public class VespaModelFactory implements ModelFactory { } } else { - validateXML(modelContext.applicationPackage(), modelContext.deployLogger(), ignoreValidationErrors); + validateXML(modelContext.applicationPackage(), ignoreValidationErrors); } DeployState deployState = createDeployState(modelContext); VespaModel model = buildModel(deployState); @@ -173,9 +171,9 @@ public class VespaModelFactory implements ModelFactory { return modelContext.properties().hostedVespa() && id.isHostedVespaRoutingApplication(); } - private void validateXML(ApplicationPackage applicationPackage, DeployLogger deployLogger, boolean ignoreValidationErrors) { + private void validateXML(ApplicationPackage applicationPackage, boolean ignoreValidationErrors) { try { - applicationPackage.validateXML(deployLogger); + applicationPackage.validateXML(); } catch (IllegalArgumentException e) { rethrowUnlessIgnoreErrors(e, ignoreValidationErrors); } catch (Exception e) { @@ -185,7 +183,7 @@ public class VespaModelFactory implements ModelFactory { private List<ConfigChangeAction> validateModel(VespaModel model, DeployState deployState, boolean ignoreValidationErrors) { try { - deployState.getApplicationPackage().validateXML(deployState.getDeployLogger()); + deployState.getApplicationPackage().validateXML(); return Validation.validate(model, ignoreValidationErrors, deployState); } catch (IllegalArgumentException e) { rethrowUnlessIgnoreErrors(e, ignoreValidationErrors); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java index 38a1e59433f..67281b7816d 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java @@ -34,7 +34,7 @@ public class Admin extends AbstractConfigProducer implements Serializable { private static final long serialVersionUID = 1L; private final Yamas yamas; - private final Map<String,MetricsConsumer> metricsConsumers; + private final Map<String, MetricsConsumer> metricsConsumers; private final List<Configserver> configservers = new ArrayList<>(); private final List<Slobrok> slobroks = new ArrayList<>(); @@ -200,7 +200,7 @@ public class Admin extends AbstractConfigProducer implements Serializable { HostResource deployHost = getHostSystem().getHostByHostname(fileDistributor.fileSourceHost()); if (deployHostIsMissing(deployHost)) { throw new RuntimeException("Could not find host in the application's host system: '" + - fileDistributor.fileSourceHost() + "'. Hostsystem=" + getHostSystem()); + fileDistributor.fileSourceHost() + "'. Hostsystem=" + getHostSystem()); } FileDistributorService fds = new FileDistributorService(fileDistribution, host.getHost().getHostName(), @@ -245,4 +245,5 @@ public class Admin extends AbstractConfigProducer implements Serializable { public boolean multitenant() { return multitenant; } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/Configserver.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/Configserver.java index 11508ba91ed..47332b064da 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/Configserver.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/Configserver.java @@ -96,10 +96,12 @@ public class Configserver extends AbstractService { // TODO: Remove this implementation when we are on Hosted Vespa. public static class Spec implements ConfigServerSpec { + private final String hostName; private final int configServerPort; private final int httpPort; private final int zooKeeperPort; + public String getHostName() { return hostName; } @@ -142,4 +144,5 @@ public class Configserver extends AbstractService { this.zooKeeperPort = zooKeeperPort; } } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java new file mode 100644 index 00000000000..dbfad0a1370 --- /dev/null +++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidator.java @@ -0,0 +1,210 @@ +package com.yahoo.vespa.model.application.validation; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import com.google.common.base.Joiner; +import com.yahoo.tensor.TensorType; + +import java.io.IOException; +import java.io.Reader; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * ConstantTensorJsonValidator strictly validates a constant tensor in JSON format read from a Reader object + * + * @author Vegard Sjonfjell + */ +public class ConstantTensorJsonValidator { + private static final String FIELD_CELLS = "cells"; + private static final String FIELD_ADDRESS = "address"; + private static final String FIELD_VALUE = "value"; + + private static final JsonFactory jsonFactory = new JsonFactory(); + private JsonParser parser; + + private Map<String, TensorType.Dimension> tensorDimensions; + + public static class InvalidConstantTensor extends RuntimeException { + public InvalidConstantTensor(JsonParser parser, String message) { + super(message + " " + parser.getCurrentLocation().toString()); + } + + public InvalidConstantTensor(JsonParser parser, Exception base) { + super("Failed to parse JSON stream " + parser.getCurrentLocation().toString(), base); + } + } + + @FunctionalInterface + private static interface SubroutineThrowingIOException { + void invoke() throws IOException; + } + + private void wrapIOException(SubroutineThrowingIOException lambda) { + try { + lambda.invoke(); + } catch (IOException e) { + throw new InvalidConstantTensor(parser, e); + } + } + + public ConstantTensorJsonValidator(Reader tensorFile, TensorType tensorType) { + wrapIOException(() -> { + this.parser = jsonFactory.createParser(tensorFile); + this.tensorDimensions = tensorType + .dimensions() + .stream() + .collect(Collectors.toMap(TensorType.Dimension::name, Function.identity())); + }); + } + + public void validate() { + wrapIOException(() -> { + assertNextTokenIs(JsonToken.START_OBJECT); + assertNextTokenIs(JsonToken.FIELD_NAME); + assertFieldNameIs(FIELD_CELLS); + + assertNextTokenIs(JsonToken.START_ARRAY); + + while (parser.nextToken() != JsonToken.END_ARRAY) { + validateTensorCell(); + } + + assertNextTokenIs(JsonToken.END_OBJECT); + }); + } + + private void validateTensorCell() { + wrapIOException(() -> { + assertCurrentTokenIs(JsonToken.START_OBJECT); + + final List<String> fieldNameCandidates = new ArrayList<>(Arrays.asList(FIELD_ADDRESS, FIELD_VALUE)); + for (int i = 0; i < 2; i++) { + assertNextTokenIs(JsonToken.FIELD_NAME); + final String fieldName = parser.getCurrentName(); + + if (fieldNameCandidates.contains(fieldName)) { + fieldNameCandidates.remove(fieldName); + + if (fieldName.equals(FIELD_ADDRESS)) { + validateTensorAddress(); + } else if (fieldName.equals(FIELD_VALUE)) { + validateTensorValue(); + } + } else { + throw new InvalidConstantTensor(parser, "Only \"address\" or \"value\" fields are permitted within a cell object"); + } + } + + assertNextTokenIs(JsonToken.END_OBJECT); + }); + } + + private void validateTensorAddress() throws IOException { + assertNextTokenIs(JsonToken.START_OBJECT); + + final Set<String> cellDimensions = new HashSet<>(tensorDimensions.keySet()); + + // Iterate within the address key, value pairs + while ((parser.nextToken() != JsonToken.END_OBJECT)) { + assertCurrentTokenIs(JsonToken.FIELD_NAME); + + final String dimensionName = parser.getCurrentName(); + TensorType.Dimension dimension = tensorDimensions.get(dimensionName); + if (dimension == null) { + throw new InvalidConstantTensor(parser, String.format("Tensor dimension \"%s\" does not exist", parser.getCurrentName())); + } + + if (!cellDimensions.contains(dimensionName)) { + throw new InvalidConstantTensor(parser, String.format("Duplicate tensor dimension \"%s\"", parser.getCurrentName())); + } + + cellDimensions.remove(dimensionName); + validateTensorCoordinate(dimension); + } + + if (!cellDimensions.isEmpty()) { + throw new InvalidConstantTensor(parser, String.format("Tensor address missing dimension(s): %s", Joiner.on(", ").join(cellDimensions))); + } + } + + /* + * Tensor coordinates are always strings. Coordinates for a mapped dimension can be any string, + * but those for indexed dimensions needs to be able to be interpreted as integers, and, + * additionally, those for indexed bounded dimensions needs to fall within the dimension size. + */ + private void validateTensorCoordinate(TensorType.Dimension dimension) throws IOException { + assertNextTokenIs(JsonToken.VALUE_STRING); + + if (dimension instanceof TensorType.IndexedBoundDimension) { + validateBoundedCoordinate((TensorType.IndexedBoundDimension) dimension); + } else if (dimension instanceof TensorType.IndexedUnboundDimension) { + validateUnboundedCoordinate(dimension); + } + } + + private void validateBoundedCoordinate(TensorType.IndexedBoundDimension dimension) { + wrapIOException(() -> { + try { + final int value = Integer.parseInt(parser.getValueAsString()); + if (value >= dimension.size().get()) { + throw new InvalidConstantTensor(parser, String.format("Coordinate \"%s\" not within limits of bounded dimension %s", value, dimension.name())); + + } + } catch (NumberFormatException e) { + throwCoordinateIsNotInteger(parser.getValueAsString(), dimension.name()); + } + }); + } + + private void validateUnboundedCoordinate(TensorType.Dimension dimension) { + wrapIOException(() -> { + try { + Integer.parseInt(parser.getValueAsString()); + } catch (NumberFormatException e) { + throwCoordinateIsNotInteger(parser.getValueAsString(), dimension.name()); + } + }); + } + + private void throwCoordinateIsNotInteger(String value, String dimensionName) { + throw new InvalidConstantTensor(parser, String.format("Coordinate \"%s\" for dimension %s is not an integer", value, dimensionName)); + } + + private void validateTensorValue() throws IOException { + final JsonToken token = parser.nextToken(); + + if (token != JsonToken.VALUE_NUMBER_FLOAT && token != JsonToken.VALUE_NUMBER_INT) { + throw new InvalidConstantTensor(parser, String.format("Tensor value is not a number (%s)", token.toString())); + } + } + + private void assertCurrentTokenIs(JsonToken wantedToken) { + assertTokenIs(parser.getCurrentToken(), wantedToken); + } + + private void assertNextTokenIs(JsonToken wantedToken) throws IOException { + assertTokenIs(parser.nextToken(), wantedToken); + } + + private void assertTokenIs(JsonToken token, JsonToken wantedToken) { + if (token != wantedToken) { + throw new InvalidConstantTensor(parser, String.format("Expected JSON token %s, but got %s", wantedToken.toString(), token.toString())); + } + } + + private void assertFieldNameIs(String wantedFieldName) throws IOException { + final String actualFieldName = parser.getCurrentName(); + + if (!actualFieldName.equals(wantedFieldName)) { + throw new InvalidConstantTensor(parser, String.format("Expected field name \"%s\", got \"%s\"", wantedFieldName, actualFieldName)); + } + } +} diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/VespaModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/VespaModelBuilder.java index 75e9caefbd5..bcf523e1c99 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/builder/VespaModelBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/VespaModelBuilder.java @@ -27,4 +27,5 @@ public abstract class VespaModelBuilder { * @param configModelRepo a {@link com.yahoo.config.model.ConfigModelRepo instance} */ public abstract void postProc(AbstractConfigProducer producerRoot, ConfigModelRepo configModelRepo); + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomClientsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomClientsBuilder.java index 876017e16bc..f1829a1d718 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomClientsBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomClientsBuilder.java @@ -35,4 +35,5 @@ public class DomClientsBuilder extends LegacyConfigModelBuilder<Clients> { throw new IllegalArgumentException("Version '" + version + "' of 'clients' not supported."); } } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomV20ClientsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomV20ClientsBuilder.java index cea325b785f..b4070c67ae1 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomV20ClientsBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomV20ClientsBuilder.java @@ -1,40 +1,17 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.model.builder.xml.dom; -import com.yahoo.component.ComponentId; -import com.yahoo.component.ComponentSpecification; -import com.yahoo.component.chain.Phase; -import com.yahoo.component.chain.dependencies.Dependencies; -import com.yahoo.component.chain.model.ChainSpecification; -import com.yahoo.component.chain.model.ChainedComponentModel; -import com.yahoo.config.model.ConfigModelUtils; import com.yahoo.vespa.config.content.spooler.SpoolerConfig; import com.yahoo.config.model.producer.AbstractConfigProducer; -import com.yahoo.container.bundle.BundleInstantiationSpecification; -import com.yahoo.osgi.provider.model.ComponentModel; import com.yahoo.text.XML; import com.yahoo.vespa.defaults.Defaults; import com.yahoo.vespa.model.SimpleConfigProducer; import com.yahoo.vespa.model.builder.xml.dom.VespaDomBuilder.DomConfigProducerBuilder; -import com.yahoo.vespa.model.builder.xml.dom.chains.docproc.DomDocprocChainsBuilder; import com.yahoo.vespa.model.clients.Clients; -import com.yahoo.vespa.model.clients.HttpGatewayOwner; import com.yahoo.vespa.model.clients.VespaSpoolMaster; import com.yahoo.vespa.model.clients.VespaSpooler; import com.yahoo.vespa.model.clients.VespaSpoolerProducer; import com.yahoo.vespa.model.clients.VespaSpoolerService; -import com.yahoo.vespa.model.container.Container; -import com.yahoo.vespa.model.container.ContainerCluster; -import com.yahoo.vespa.model.container.component.Handler; -import com.yahoo.vespa.model.container.component.chain.ProcessingHandler; -import com.yahoo.vespa.model.container.docproc.ContainerDocproc; -import com.yahoo.vespa.model.container.docproc.DocprocChains; -import com.yahoo.vespa.model.container.search.ContainerHttpGateway; -import com.yahoo.vespa.model.container.search.ContainerSearch; -import com.yahoo.vespa.model.container.search.searchchain.SearchChain; -import com.yahoo.vespa.model.container.search.searchchain.SearchChains; -import com.yahoo.vespa.model.container.search.searchchain.Searcher; -import com.yahoo.vespa.model.container.xml.ContainerModelBuilder; import com.yahoo.vespaclient.config.FeederConfig; import org.w3c.dom.Element; import org.w3c.dom.Node; @@ -43,9 +20,6 @@ import org.w3c.dom.NodeList; import java.io.Serializable; import java.util.ArrayList; import java.util.List; -import java.util.Set; -import java.util.TreeSet; -import java.util.logging.Level; /** * Builds the Clients plugin @@ -54,29 +28,17 @@ import java.util.logging.Level; */ public class DomV20ClientsBuilder { - public static final String vespaClientBundleSpecification = "vespaclient-container-plugin"; - // The parent docproc plugin to register data with. private final Clients clients; DomV20ClientsBuilder(Clients clients, String version) { - this.clients = clients; - if (!version.equals("2.0")) { + if ( ! version.equals("2.0")) throw new IllegalArgumentException("Version '" + version + "' of 'clients' not supported."); - } + this.clients = clients; } public void build(Element spec) { - NodeList children = spec.getElementsByTagName("gateways"); - if (children.getLength() > 0 && clients.getConfigProducer()!=null) - clients.getConfigProducer().deployLogger().log(Level.WARNING, "The 'gateways' element is deprecated, and will be disallowed in a " + - "later version of Vespa. Use 'document-api' under 'jdisc' instead, see: " + - ConfigModelUtils.createDocLink("reference/services-jdisc.html")); - for (int i = 0; i < children.getLength(); i++) { - createGateways(clients.getConfigProducer(), (Element) children.item(i), clients); - } - - children = spec.getElementsByTagName("spoolers"); + NodeList children = spec.getElementsByTagName("spoolers"); for (int i = 0; i < children.getLength(); i++) { createSpoolers(clients.getConfigProducer(), (Element) children.item(i), clients); } @@ -87,29 +49,6 @@ public class DomV20ClientsBuilder { } } - static Boolean getBooleanNodeValue(Node node) { - return Boolean.valueOf(node.getFirstChild().getNodeValue()); - } - - static boolean getHttpFileServerEnabled(Element parentHttpFileServer, Element httpFileServer) { - boolean ret=false; - if (parentHttpFileServer != null) { - for (Element child : XML.getChildren(parentHttpFileServer)) { - if ("enabled".equals(child.getNodeName())) { - ret = getBooleanNodeValue(child); - } - } - } - if (httpFileServer != null) { - for (Element child : XML.getChildren(httpFileServer)) { - if ("enabled".equals(child.getNodeName())) { - ret = getBooleanNodeValue(child); - } - } - } - return ret; - } - private void createLoadTypes(Element element, Clients clients) { for (Element e : XML.getChildren(element, "type")) { String priority = e.getAttribute("default-priority"); @@ -118,31 +57,6 @@ public class DomV20ClientsBuilder { } /** - * Creates HttpGateway objects using the given xml Element. - * - * @param pcp AbstractConfigProducer - * @param element The xml Element - */ - private void createGateways(AbstractConfigProducer pcp, Element element, Clients clients) { - String jvmArgs = null; - if (element.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) jvmArgs=element.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); - - Element gatewaysFeederOptions = findFeederOptions(element); - - HttpGatewayOwner owner = new HttpGatewayOwner(pcp, getFeederConfig(null, gatewaysFeederOptions)); - ContainerCluster cluster = new ContainerHttpGatewayClusterBuilder().build(owner, element); - - int index = 0; - for (Element e : XML.getChildren(element, "gateway")) { - ContainerHttpGateway qrs = new ContainerHttpGatewayBuilder(cluster, index).build(cluster, e); - - if ("".equals(qrs.getJvmArgs()) && jvmArgs!=null) qrs.setJvmArgs(jvmArgs); - index++; - } - clients.setContainerHttpGateways(cluster); - } - - /** * Creates VespaSpooler objects using the given xml Element. */ private void createSpoolers(AbstractConfigProducer pcp, Element element, Clients clients) { @@ -170,13 +84,10 @@ public class DomV20ClientsBuilder { } } - private void createSpoolMasters(SimpleConfigProducer producer, - Element element) { + private void createSpoolMasters(SimpleConfigProducer producer, Element element) { int i=0; - for (Element e : XML.getChildren(element, "spoolmaster")) { - VespaSpoolMaster master = new VespaSpoolMasterBuilder(i).build(producer, e); - i++; - } + for (Element e : XML.getChildren(element, "spoolmaster")) + new VespaSpoolMasterBuilder(i++).build(producer, e); } private SpoolerConfig.Builder getSpoolConfig(Element conf) { @@ -313,133 +224,6 @@ public class DomV20ClientsBuilder { } } - public static class ContainerHttpGatewayClusterBuilder extends DomConfigProducerBuilder<ContainerCluster> { - @Override - protected ContainerCluster doBuild(AbstractConfigProducer parent, - Element spec) { - - ContainerCluster cluster = new ContainerCluster(parent, "gateway", "gateway"); - - SearchChains searchChains = new SearchChains(cluster, "searchchain"); - Set<ComponentSpecification> inherited = new TreeSet<>(); - //inherited.add(new ComponentSpecification("vespa", null, null)); - { - SearchChain mySearchChain = new SearchChain(new ChainSpecification(new ComponentId("vespaget"), - new ChainSpecification.Inheritance(inherited, null), new ArrayList<>(), new TreeSet<>())); - Searcher getComponent = newVespaClientSearcher("com.yahoo.storage.searcher.GetSearcher"); - mySearchChain.addInnerComponent(getComponent); - searchChains.add(mySearchChain); - } - { - SearchChain mySearchChain = new SearchChain(new ChainSpecification(new ComponentId("vespavisit"), - new ChainSpecification.Inheritance(inherited, null), new ArrayList<>(), new TreeSet<>())); - Searcher getComponent = newVespaClientSearcher("com.yahoo.storage.searcher.VisitSearcher"); - mySearchChain.addInnerComponent(getComponent); - searchChains.add(mySearchChain); - } - - ContainerSearch containerSearch = new ContainerSearch(cluster, searchChains, new ContainerSearch.Options()); - cluster.setSearch(containerSearch); - - cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandler", "http://*/feed")); - cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerRemove", "http://*/remove")); - cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerRemoveLocation", "http://*/removelocation")); - cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerGet", "http://*/get")); - cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerVisit", "http://*/visit")); - cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerCompatibility", "http://*/document")); - cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerStatus", "http://*/feedstatus")); - final ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>( - cluster.getSearch().getChains(), "com.yahoo.search.handler.SearchHandler"); - searchHandler.addServerBindings("http://*/search/*"); - cluster.addComponent(searchHandler); - - ContainerModelBuilder.addDefaultHandler_legacyBuilder(cluster); - - //BEGIN HACK for docproc chains: - DocprocChains docprocChains = getDocprocChains(cluster, spec); - if (docprocChains != null) { - ContainerDocproc containerDocproc = new ContainerDocproc(cluster, docprocChains); - cluster.setDocproc(containerDocproc); - } - //END HACK - - return cluster; - } - - private Handler newVespaClientHandler(String componentId, String binding) { - Handler<AbstractConfigProducer<?>> handler = new Handler<>(new ComponentModel( - BundleInstantiationSpecification.getFromStrings(componentId, null, vespaClientBundleSpecification), "")); - handler.addServerBindings(binding); - handler.addServerBindings(binding + '/'); - return handler; - } - - private Searcher newVespaClientSearcher(String componentSpec) { - return new Searcher<>(new ChainedComponentModel( - BundleInstantiationSpecification.getFromStrings(componentSpec, null, vespaClientBundleSpecification), - new Dependencies(null, null, null))); - } - - //BEGIN HACK for docproc chains: - private DocprocChains getDocprocChains(AbstractConfigProducer qrs, Element gateways) { - Element clients = (Element) gateways.getParentNode(); - Element services = (Element) clients.getParentNode(); - if (services == null) { - return null; - } - - Element docproc = XML.getChild(services, "docproc"); - if (docproc == null) { - return null; - } - - String version = docproc.getAttribute("version"); - if (version.startsWith("1.")) { - return null; - } else if (version.startsWith("2.")) { - return null; - } else if (version.startsWith("3.")) { - return getDocprocChainsV3(qrs, docproc); - } else { - throw new IllegalArgumentException("Docproc version " + version + " unknown."); - } - } - - private DocprocChains getDocprocChainsV3(AbstractConfigProducer qrs, Element docproc) { - Element docprocChainsElem = XML.getChild(docproc, "docprocchains"); - if (docprocChainsElem == null) { - return null; - } - return new DomDocprocChainsBuilder(null, true).build(qrs, docprocChainsElem); - } - //END HACK - } - - public static class ContainerHttpGatewayBuilder extends DomConfigProducerBuilder<ContainerHttpGateway> { - int index; - ContainerCluster cluster; - - public ContainerHttpGatewayBuilder(ContainerCluster cluster, int index) { - this.index = index; - this.cluster = cluster; - } - - @Override - protected ContainerHttpGateway doBuild(AbstractConfigProducer parent, Element spec) { - // TODO: remove port handling - int port = 19020; - if (spec != null && spec.hasAttribute("baseport")) { - port = Integer.parseInt(spec.getAttribute("baseport")); - } - ContainerHttpGateway httpGateway = new ContainerHttpGateway(cluster, "" + index, port, index); - List<Container> containers = new ArrayList<>(); - containers.add(httpGateway); - - cluster.addContainers(containers); - return httpGateway; - } - } - /** * This class parses the feederoptions xml tag and produces Vespa config output. * @@ -553,4 +337,5 @@ public class DomV20ClientsBuilder { return builder; } } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java index f22be501ff2..c83f6098a0f 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java @@ -12,8 +12,8 @@ import java.util.Map; import java.util.Optional; /** - * A common utility class to represent a requirement for some nodes during model building. - * Such a requirement is commonly specified as a <code>nodes</code> element. + * A common utility class to represent a requirement for nodes during model building. + * Such a requirement is commonly specified in services.xml as a <code>nodes</code> element. * * @author bratseth */ @@ -25,14 +25,22 @@ public class NodesSpecification { private final int groups; + /** + * Whether the capacity amount specified is required or can it be relaxed + * at the discretion of the component fulfilling it + */ + private final boolean required; + private final Optional<String> flavor; private final Optional<String> dockerImage; - private NodesSpecification(boolean dedicated, int count, int groups, Optional<String> flavor, Optional<String> dockerImage) { + private NodesSpecification(boolean dedicated, int count, int groups, boolean required, + Optional<String> flavor, Optional<String> dockerImage) { this.dedicated = dedicated; this.count = count; this.groups = groups; + this.required = required; this.flavor = flavor; this.dockerImage = dockerImage; } @@ -41,6 +49,7 @@ public class NodesSpecification { this(dedicated, nodesElement.requiredIntegerAttribute("count"), nodesElement.getIntegerAttribute("groups", 1), + nodesElement.getBooleanAttribute("required", false), Optional.ofNullable(nodesElement.getStringAttribute("flavor")), Optional.ofNullable(nodesElement.getStringAttribute("docker-image"))); } @@ -78,7 +87,7 @@ public class NodesSpecification { /** Returns a requirement from <code>count</code> nondedicated nodes in one group */ public static NodesSpecification nonDedicated(int count) { - return new NodesSpecification(false, count, 1, Optional.empty(), Optional.empty()); + return new NodesSpecification(false, count, 1, false, Optional.empty(), Optional.empty()); } /** @@ -95,7 +104,7 @@ public class NodesSpecification { public Map<HostResource, ClusterMembership> provision(HostSystem hostSystem, ClusterSpec.Type clusterType, ClusterSpec.Id clusterId, DeployLogger logger) { ClusterSpec cluster = ClusterSpec.request(clusterType, clusterId, dockerImage); - return hostSystem.allocateHosts(cluster, Capacity.fromNodeCount(count, flavor), groups, logger); + return hostSystem.allocateHosts(cluster, Capacity.fromNodeCount(count, flavor, required), groups, logger); } @Override diff --git a/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java b/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java index 55cfc8b2fba..c1ad6eead47 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java @@ -26,7 +26,7 @@ import java.util.Set; import java.util.TreeSet; /** - * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a> + * @author Einar M R Rosenvinge * @since 5.1.11 */ public class ContainerDocumentApi implements FeederConfig.Producer { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java index 268f7a87f2c..485e206f9a3 100755 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerCluster.java @@ -243,8 +243,7 @@ public final class ContainerCluster Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( "com.yahoo.container.handler.observability.ApplicationStatusHandler", null), null)); - statusHandler.addServerBindings("http://*/ApplicationStatus", - "https://*/ApplicationStatus"); + statusHandler.addServerBindings("http://*/ApplicationStatus", "https://*/ApplicationStatus"); addComponent(statusHandler); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java index 6d83c614044..1351933fbc8 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java @@ -14,7 +14,9 @@ import com.yahoo.config.model.producer.AbstractConfigProducer; import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.ClusterMembership; +import com.yahoo.config.provision.NodeType; import com.yahoo.container.jdisc.config.MetricDefaultsConfig; +import com.yahoo.path.Path; import com.yahoo.search.rendering.RendererRegistry; import com.yahoo.text.XML; import com.yahoo.vespa.defaults.Defaults; @@ -103,7 +105,6 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { checkVersion(spec); this.log = modelContext.getDeployLogger(); - ContainerCluster cluster = createContainerCluster(spec, modelContext); addClusterContent(cluster, spec, modelContext); addBundlesForPlatformComponents(cluster); @@ -396,9 +397,11 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { } private List<Container> createNodes(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) { - if (nodesElement.hasAttribute("count")) + if (nodesElement.hasAttribute("count")) // regular, hosted node spec return createNodesFromNodeCount(cluster, nodesElement); - else if (nodesElement.hasAttribute("of")) + else if (nodesElement.hasAttribute("type")) // internal use for hosted system infrastructure nodes + return createNodesFromNodeType(cluster, nodesElement); + else if (nodesElement.hasAttribute("of")) // hosted node spec referencing a content cluster return createNodesFromContentServiceReference(cluster, nodesElement, context); else // the non-hosted option return createNodesFromNodeList(cluster, nodesElement); @@ -452,6 +455,17 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { log); return createNodesFromHosts(hosts, cluster); } + + private List<Container> createNodesFromNodeType(ContainerCluster cluster, Element nodesElement) { + NodeType type = NodeType.valueOf(nodesElement.getAttribute("type")); + ClusterSpec clusterSpec = ClusterSpec.request(ClusterSpec.Type.container, + ClusterSpec.Id.from(cluster.getName()), + Optional.empty()); + Map<HostResource, ClusterMembership> hosts = + cluster.getRoot().getHostSystem().allocateHosts(clusterSpec, + Capacity.fromRequiredNodeType(type), 1, log); + return createNodesFromHosts(hosts, cluster); + } private List<Container> createNodesFromContentServiceReference(ContainerCluster cluster, Element nodesElement, ConfigModelContext context) { // Resolve references to content clusters at the XML level because content clusters must be built after container clusters @@ -475,7 +489,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { context.getDeployLogger()); return createNodesFromHosts(hosts, cluster); } - + /** Returns the services element above the given Element, or empty if there is no services element */ private Optional<Element> servicesRootOf(Element element) { Node parent = element.getParentNode(); @@ -502,8 +516,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { List<Container> nodes = new ArrayList<>(); int nodeCount = 0; for (Element nodeElem: XML.getChildren(nodesElement, "node")) { - Container container = new ContainerServiceBuilder("container." + nodeCount, nodeCount).build(cluster, nodeElem); - nodes.add(container); + nodes.add(new ContainerServiceBuilder("container." + nodeCount, nodeCount).build(cluster, nodeElem)); nodeCount++; } return nodes; @@ -574,9 +587,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { private ContainerDocumentApi buildDocumentApi(ContainerCluster cluster, Element spec) { Element documentApiElement = XML.getChild(spec, "document-api"); - if (documentApiElement == null) { - return null; - } + if (documentApiElement == null) return null; ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement); return new ContainerDocumentApi(cluster, documentApiOptions); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java index df2090db166..b1ffd55b0f0 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java @@ -12,10 +12,11 @@ import java.util.List; import java.util.logging.Logger; /** - * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a> + * @author Einar M R Rosenvinge * @since 5.1.11 */ public class DocumentApiOptionsBuilder { + private static final Logger log = Logger.getLogger(DocumentApiOptionsBuilder.class.getName()); private static final String[] DEFAULT_BINDINGS = {"http://*/", "https://*/"}; @@ -116,4 +117,5 @@ public class DocumentApiOptionsBuilder { String value = getCleanValue(spec, "abortondocumenterror"); return value == null ? null : Boolean.parseBoolean(value); } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java index 56e275e74ac..7e24285c6fb 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java @@ -31,6 +31,7 @@ public class ContentSearchCluster extends AbstractConfigProducer implements Prot /** The single, indexed search cluster this sets up (supporting multiple document types), or null if none */ private IndexedSearchCluster indexedCluster; + private Redundancy redundancy; private final String clusterName; Map<String, NewDocumentType> documentDefinitions; @@ -254,6 +255,7 @@ public class ContentSearchCluster extends AbstractConfigProducer implements Prot if (usesHierarchicDistribution()) { indexedCluster.setMaxNodesDownPerFixedRow((redundancy.effectiveFinalRedundancy() / groupToSpecMap.size()) - 1); } + this.redundancy = redundancy; } @Override @@ -287,6 +289,9 @@ public class ContentSearchCluster extends AbstractConfigProducer implements Prot if (tuning != null) { tuning.getConfig(builder); } + if (redundancy != null) { + redundancy.getConfig(builder); + } } @Override diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java b/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java index 262c985e733..918bdcb8cb7 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java @@ -2,19 +2,21 @@ package com.yahoo.vespa.model.content; import com.yahoo.vespa.config.content.StorDistributionConfig; +import com.yahoo.vespa.config.search.core.ProtonConfig; /** * Configuration of the redundancy of a content cluster. * * @author bratseth */ -public class Redundancy implements StorDistributionConfig.Producer { +public class Redundancy implements StorDistributionConfig.Producer, ProtonConfig.Producer { private final int initialRedundancy ; private final int finalRedundancy; private final int readyCopies; private int implicitGroups = 1; + private int explicitGroups = 1; /** The total number of nodes available in this cluster (assigned when this becomes known) */ private int totalNodes = 0; @@ -39,6 +41,7 @@ public class Redundancy implements StorDistributionConfig.Producer { * values returned in the config. */ public void setImplicitGroups(int implicitGroups) { this.implicitGroups = implicitGroups; } + public void setExplicitGroups(int explicitGroups) { this.explicitGroups = explicitGroups; } public int initialRedundancy() { return initialRedundancy; } public int finalRedundancy() { return finalRedundancy; } @@ -54,4 +57,11 @@ public class Redundancy implements StorDistributionConfig.Producer { builder.redundancy(effectiveFinalRedundancy()); builder.ready_copies(effectiveReadyCopies()); } + @Override + public void getConfig(ProtonConfig.Builder builder) { + ProtonConfig.Distribution.Builder distBuilder = new ProtonConfig.Distribution.Builder(); + distBuilder.redundancy(finalRedundancy/explicitGroups); + distBuilder.searchablecopies(readyCopies/(explicitGroups*implicitGroups)); + builder.distribution(distBuilder); + } } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java index fa417b34844..ef05a3d6ff5 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java @@ -104,8 +104,7 @@ public class ContentCluster extends AbstractConfigProducer implements StorDistri String routingSelection = new DocumentSelectionBuilder().build(contentElement.getChild("documents")); Redundancy redundancy = new RedundancyBuilder().build(contentElement); - ContentCluster c = new ContentCluster(ancestor, getClusterName(contentElement), documentDefinitions, - routingSelection, redundancy); + ContentCluster c = new ContentCluster(ancestor, getClusterName(contentElement), documentDefinitions, routingSelection, redundancy); c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterName(contentElement), contentElement).build(c, contentElement.getXml()); c.search = new ContentSearchCluster.Builder(documentDefinitions).build(c, contentElement.getXml()); c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c); @@ -113,6 +112,7 @@ public class ContentCluster extends AbstractConfigProducer implements StorDistri c.distributorNodes = new DistributorCluster.Builder(c).build(c, w3cContentElement); c.rootGroup = new StorageGroup.Builder(contentElement, c, deployLogger).buildRootGroup(); validateThatGroupSiblingsAreUnique(c.clusterName, c.rootGroup); + redundancy.setExplicitGroups(c.getRootGroup().getNumberOfLeafGroups()); c.search.handleRedundancy(redundancy); IndexedSearchCluster index = c.search.getIndexed(); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributionConfigProducer.java b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributionConfigProducer.java index 630118cc60c..095a5e29450 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributionConfigProducer.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributionConfigProducer.java @@ -13,6 +13,7 @@ import java.util.Map; * @author tonytv */ public class FileDistributionConfigProducer extends AbstractConfigProducer { + private final Map<Host, FileDistributorService> fileDistributorServices = new IdentityHashMap<>(); private final FileDistributor fileDistributor; private final FileDistributionOptions options; @@ -56,4 +57,5 @@ public class FileDistributionConfigProducer extends AbstractConfigProducer { return new FileDistributionConfigProducer(ancestor, fileDistributor, options); } } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java index 4dc24618a61..df7b4f58ab5 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java @@ -117,4 +117,5 @@ public class FileDistributor { result.addAll(asList(additionalHosts)); return result; } + } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java index 4ab9ed3af85..84685ecef3d 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java @@ -32,8 +32,8 @@ public class FileSender implements Serializable { public static FileReference sendFileToServices(String relativePath, Collection<? extends AbstractService> services) { if (services.isEmpty()) { - throw new IllegalStateException("'sendFileToServices called for empty services!" + - " - This should never happen!"); + throw new IllegalStateException("No service instances. Probably a standalone cluster setting up <nodes> " + + "using 'count' instead of <node> tags."); } FileReference fileref = null; for (AbstractService service : services) { @@ -146,4 +146,5 @@ public class FileSender implements Serializable { } builder.setValue(reference.value()); } + }
\ No newline at end of file diff --git a/config-model/src/main/java/com/yahoo/vespa/model/utils/FreezableMap.java b/config-model/src/main/java/com/yahoo/vespa/model/utils/FreezableMap.java index a05008cc9a0..211413f9bff 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/utils/FreezableMap.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/utils/FreezableMap.java @@ -4,8 +4,9 @@ package com.yahoo.vespa.model.utils; import java.util.*; /** - * Delegates to a map that can be froozen. + * Delegates to a map that can be frozen. * Not thread safe. + * * @author tonytv */ public class FreezableMap<K, V> implements Map<K, V> { @@ -88,4 +89,5 @@ public class FreezableMap<K, V> implements Map<K, V> { public boolean isFrozen() { return frozen; } + } diff --git a/config-model/src/main/resources/schema/common.rnc b/config-model/src/main/resources/schema/common.rnc index 06e7b945c18..b89fe0d7fcb 100644 --- a/config-model/src/main/resources/schema/common.rnc +++ b/config-model/src/main/resources/schema/common.rnc @@ -23,6 +23,7 @@ Nodes = element nodes { OptionalDedicatedNodes = element nodes { attribute count { xsd:positiveInteger } & attribute flavor { xsd:string }? & + attribute required { xsd:boolean }? & attribute docker-image { xsd:string }? & attribute dedicated { xsd:boolean }? } diff --git a/config-model/src/main/resources/schema/containercluster.rnc b/config-model/src/main/resources/schema/containercluster.rnc index e878b0faf22..bf5f106a388 100644 --- a/config-model/src/main/resources/schema/containercluster.rnc +++ b/config-model/src/main/resources/schema/containercluster.rnc @@ -161,7 +161,7 @@ ProcessingInContainer = element processing { -# DOCUMENT API/GATEWAY: +# DOCUMENT API: DocumentApi = element document-api { ServerBindings & @@ -187,11 +187,17 @@ NodesOfContainerCluster = element nodes { attribute allocated-memory { text }? & attribute cpu-socket-affinity { xsd:boolean }? & ( - attribute of { xsd:string } + ( + attribute of { xsd:string } & + attribute required { xsd:boolean }? + ) + | + attribute type { xsd:string } | ( attribute count { xsd:positiveInteger } & attribute flavor { xsd:string }? & + attribute required { xsd:boolean }? & attribute docker-image { xsd:string }? ) | diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc index 30b931053d5..c3a8386ac5e 100644 --- a/config-model/src/main/resources/schema/content.rnc +++ b/config-model/src/main/resources/schema/content.rnc @@ -216,6 +216,7 @@ ContentNodes = element nodes { ( attribute count { xsd:positiveInteger } & attribute flavor { xsd:string }? & + attribute required { xsd:boolean }? & attribute docker-image { xsd:string }? & attribute groups { xsd:positiveInteger }? ) @@ -260,6 +261,7 @@ Group = element group { element nodes { attribute count { xsd:positiveInteger } & attribute flavor { xsd:string }? & + attribute required { xsd:boolean }? & attribute docker-image { xsd:string }? & attribute groups { xsd:positiveInteger }? } diff --git a/config-model/src/main/resources/schema/deployment.rnc b/config-model/src/main/resources/schema/deployment.rnc new file mode 100644 index 00000000000..22ceab4efa5 --- /dev/null +++ b/config-model/src/main/resources/schema/deployment.rnc @@ -0,0 +1,26 @@ +# RELAX NG Compact Syntax +# Vespa Deployment file + +start = element deployment { + attribute version { "1.0" } & + Test? & + Staging? & + Prod* +} + +Test = element test { + text +} + +Staging = element staging { + text +} + +Prod = + element prod { + attribute global-service-id { text }?, + element region { + attribute active { xsd:boolean }, + text + }* + } diff --git a/config-model/src/main/resources/schema/schemas.xml b/config-model/src/main/resources/schema/schemas.xml index 728754e3a5f..ed39af3d490 100644 --- a/config-model/src/main/resources/schema/schemas.xml +++ b/config-model/src/main/resources/schema/schemas.xml @@ -3,4 +3,5 @@ <locatingRules xmlns="http://thaiopensource.com/ns/locating-rules/1.0"> <documentElement localName="hosts" uri="hosts.rnc"/> <documentElement localName="services" uri="services.rnc"/> + <documentElement localName="deployment" uri="deployment.rnc"/> </locatingRules> diff --git a/config-model/src/test/cfg/application/app1/deployment.xml b/config-model/src/test/cfg/application/app1/deployment.xml new file mode 100644 index 00000000000..34d2036c1a5 --- /dev/null +++ b/config-model/src/test/cfg/application/app1/deployment.xml @@ -0,0 +1,8 @@ +<deployment version="1.0"> + <test/> + <staging/> + <prod global-service-id="query"> + <region active="true">us-east-3</region> + <region active="false">us-west-1</region> + </prod> +</deployment> diff --git a/config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml b/config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml new file mode 100644 index 00000000000..7a1089a6c1e --- /dev/null +++ b/config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml @@ -0,0 +1,8 @@ +<deployment version="1.0"> + <test/> + <staging/> + <prod global-service-id="query"> + <region>us-east-3</region> + <region active="false">us-west-1</region> + </prod> +</deployment> diff --git a/config-model/src/test/cfg/application/app_invalid_deployment_xml/hosts.xml b/config-model/src/test/cfg/application/app_invalid_deployment_xml/hosts.xml new file mode 100644 index 00000000000..132169097cf --- /dev/null +++ b/config-model/src/test/cfg/application/app_invalid_deployment_xml/hosts.xml @@ -0,0 +1,9 @@ +<?xml version="1.0" encoding="utf-8" ?> +<hosts> + <host name="localhost"> + <alias>node1</alias> + </host> + <host name="schmocalhost"> + <alias>node2</alias> + </host> +</hosts> diff --git a/config-model/src/test/cfg/application/app_invalid_deployment_xml/services.xml b/config-model/src/test/cfg/application/app_invalid_deployment_xml/services.xml new file mode 100644 index 00000000000..a1702af234f --- /dev/null +++ b/config-model/src/test/cfg/application/app_invalid_deployment_xml/services.xml @@ -0,0 +1,15 @@ +<?xml version="1.0" encoding="utf-8" ?> +<services version="1.0"> + + <admin version="2.0"> + <adminserver hostalias="node1"/> + </admin> + + <container version="1.0"> + <nodes> + <node hostalias="node1" /> + </nodes> + <search/> + </container> + +</services> diff --git a/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/deployment.xml b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/deployment.xml new file mode 100644 index 00000000000..d04cc5dfd65 --- /dev/null +++ b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/deployment.xml @@ -0,0 +1,5 @@ +<deployment version="1.0"> + <test/> + <staging/> + <prod /> +</deployment> diff --git a/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/hosts.xml b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/hosts.xml new file mode 100644 index 00000000000..132169097cf --- /dev/null +++ b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/hosts.xml @@ -0,0 +1,9 @@ +<?xml version="1.0" encoding="utf-8" ?> +<hosts> + <host name="localhost"> + <alias>node1</alias> + </host> + <host name="schmocalhost"> + <alias>node2</alias> + </host> +</hosts> diff --git a/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/services.xml b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/services.xml new file mode 100644 index 00000000000..a1702af234f --- /dev/null +++ b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/services.xml @@ -0,0 +1,15 @@ +<?xml version="1.0" encoding="utf-8" ?> +<services version="1.0"> + + <admin version="2.0"> + <adminserver hostalias="node1"/> + </admin> + + <container version="1.0"> + <nodes> + <node hostalias="node1" /> + </nodes> + <search/> + </container> + +</services> diff --git a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java index d4e521ebd13..0334b3c867b 100644 --- a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java +++ b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java @@ -21,6 +21,7 @@ import com.yahoo.vespa.model.VespaModel; import com.yahoo.vespa.model.search.SearchDefinition; import org.json.JSONException; import org.junit.After; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -203,6 +204,28 @@ public class ApplicationDeployTest { assertThat(getSearchDefinitions(app).size(), is(6)); } + @Test + public void testThatAppWithDeploymentXmlIsValid() throws IOException { + File tmpDir = Files.createTempDir(); + IOUtils.copyDirectory(new File(TESTDIR, "app1"), tmpDir); + createAppPkg(tmpDir.getAbsolutePath()); + } + + @Ignore // TODO: Enable when code in ApplicationPackageXmlFilesValidator does validation of deployment.xml + @Test(expected = IllegalArgumentException.class) + public void testThatAppWithIllegalDeploymentXmlIsNotValid() throws IOException { + File tmpDir = Files.createTempDir(); + IOUtils.copyDirectory(new File(TESTDIR, "app_invalid_deployment_xml"), tmpDir); + createAppPkg(tmpDir.getAbsolutePath()); + } + + @Test + public void testThatAppWithIllegalEmptyProdRegion() throws IOException { + File tmpDir = Files.createTempDir(); + IOUtils.copyDirectory(new File(TESTDIR, "empty_prod_region_in_deployment_xml"), tmpDir); + createAppPkg(tmpDir.getAbsolutePath()); + } + private List<SearchDefinition> getSearchDefinitions(FilesApplicationPackage app) { return new DeployState.Builder().applicationPackage(app).build().getSearchDefinitions(); } diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java index 86fabdf26bc..8ed4539456a 100644 --- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java +++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java @@ -522,6 +522,7 @@ public class ModelProvisioningTest { assertEquals(1, clusterControllers.getContainers().size()); // TODO: Expected 5 with this feature reactivated } + @Test public void testClusterControllersAreNotPlacedOnRetiredNodes() { String services = "<?xml version='1.0' encoding='utf-8' ?>\n" + @@ -907,6 +908,26 @@ public class ModelProvisioningTest { assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0")); } + @Test(expected = IllegalArgumentException.class) + public void testRequiringMoreNodesThanAreAvailable() throws ParseException { + String services = + "<?xml version='1.0' encoding='utf-8' ?>\n" + + "<services>" + + " <content version='1.0' id='bar'>" + + " <redundancy>1</redundancy>" + + " <documents>" + + " <document type='type1' mode='index'/>" + + " </documents>" + + " <nodes count='3' required='true'/>" + + " </content>" + + "</services>"; + + int numberOfHosts = 2; + VespaModelTester tester = new VespaModelTester(); + tester.addHosts(numberOfHosts); + tester.createModel(services, false); + } + @Test public void testUsingNodesCountAttributesAndGettingJustOneNode() { String services = diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java new file mode 100644 index 00000000000..912af4f63c1 --- /dev/null +++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ConstantTensorJsonValidatorTest.java @@ -0,0 +1,262 @@ +package com.yahoo.vespa.model.application.validation; + +import com.yahoo.tensor.TensorType; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.io.Reader; +import java.io.StringReader; + +import static com.yahoo.test.json.JsonTestHelper.inputJson; +import static com.yahoo.vespa.model.application.validation.ConstantTensorJsonValidator.InvalidConstantTensor; + +public class ConstantTensorJsonValidatorTest { + private static Reader inputJsonToReader(String... lines) { + return new StringReader(inputJson(lines)); + } + + private static void validateTensorJson(TensorType tensorType, Reader jsonTensorReader) { + ConstantTensorJsonValidator validator = new ConstantTensorJsonValidator(jsonTensorReader, tensorType); + validator.validate(); + } + + @Rule + public ExpectedException expectedException = ExpectedException.none(); + + @Test + public void ensure_that_unbounded_tensor_works() { + validateTensorJson( + TensorType.fromSpec("tensor(x[], y[])"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': '99999', 'y': '47' },", + " 'value': 9932.0", + " }", + " ]", + "}")); + } + + @Test + public void ensure_that_bounded_tensor_within_limits_works() { + validateTensorJson( + TensorType.fromSpec("tensor(x[5], y[10])"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': '3', 'y': '2' },", + " 'value': 2.0", + " }", + " ]", + "}")); + } + + @Test + public void ensure_that_multiple_cells_work() { + validateTensorJson( + TensorType.fromSpec("tensor(x[], y[])"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': '3', 'y': '2' },", + " 'value': 2.0", + " },", + " {", + " 'address': { 'x': '2', 'y': '0' },", + " 'value': 45", + " }", + " ]", + "}")); + } + + + @Test + public void ensure_that_no_cells_work() { + validateTensorJson( + TensorType.fromSpec("tensor(x[], y[])"), + inputJsonToReader( + "{", + " 'cells': []", + "}")); + } + + @Test + public void ensure_that_bounded_tensor_outside_limits_is_disallowed() { + expectedException.expect(InvalidConstantTensor.class); + expectedException.expectMessage("Coordinate \"5\" not within limits of bounded dimension x"); + + validateTensorJson( + TensorType.fromSpec("tensor(x[5], y[10])"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': '5', 'y': '2' },", + " 'value': 1e47", + " }", + " ]", + "}")); + } + + @Test + public void ensure_that_mapped_tensor_works() { + validateTensorJson( + TensorType.fromSpec("tensor(x{}, y{})"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': 'andrei', 'y': 'bjarne' },", + " 'value': 2.0", + " }", + " ]", + "}")); + } + + @Test + public void ensure_that_non_integer_strings_in_address_points_are_disallowed_unbounded() { + expectedException.expect(InvalidConstantTensor.class); + expectedException.expectMessage("Coordinate \"a\" for dimension x is not an integer"); + + validateTensorJson( + TensorType.fromSpec("tensor(x[])"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': 'a' },", + " 'value': 47.0", + " }", + " ]", + "}")); + + } + + @Test + public void ensure_that_non_integer_strings_in_address_points_are_disallowed_bounded() { + expectedException.expect(InvalidConstantTensor.class); + expectedException.expectMessage("Coordinate \"a\" for dimension x is not an integer"); + + validateTensorJson( + TensorType.fromSpec("tensor(x[5])"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': 'a' },", + " 'value': 41.0", + " }", + " ]", + "}")); + + } + + @Test + public void ensure_that_missing_coordinates_fail() { + expectedException.expect(InvalidConstantTensor.class); + expectedException.expectMessage("Tensor address missing dimension(s): y, z"); + + validateTensorJson( + TensorType.fromSpec("tensor(x[], y[], z[])"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': '3' },", + " 'value': 99.3", + " }", + " ]", + "}")); + } + + @Test + public void ensure_that_non_number_values_are_disallowed() { + expectedException.expect(InvalidConstantTensor.class); + expectedException.expectMessage("Tensor value is not a number (VALUE_STRING)"); + + validateTensorJson( + TensorType.fromSpec("tensor(x[])"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': '3' },", + " 'value': 'fruit'", + " }", + " ]", + "}")); + } + + @Test + public void ensure_that_extra_dimensions_are_disallowed() { + expectedException.expect(InvalidConstantTensor.class); + expectedException.expectMessage("Tensor dimension \"z\" does not exist"); + + validateTensorJson( + TensorType.fromSpec("tensor(x[], y[])"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': '3', 'y': '2', 'z': '4' },", + " 'value': 99.3", + " }", + " ]", + "}")); + } + + @Test + public void ensure_that_duplicate_dimensions_are_disallowed() { + expectedException.expect(InvalidConstantTensor.class); + expectedException.expectMessage("Duplicate tensor dimension \"y\""); + + validateTensorJson( + TensorType.fromSpec("tensor(x[], y[])"), + inputJsonToReader( + "{", + " 'cells': [", + " {", + " 'address': { 'x': '1', 'y': '2', 'y': '4' },", + " 'value': 88.1", + " }", + " ]", + "}")); + } + + @Test + public void ensure_that_invalid_json_fails() { + expectedException.expect(InvalidConstantTensor.class); + expectedException.expectMessage("Failed to parse JSON stream"); + + validateTensorJson( + TensorType.fromSpec("tensor(x[], y[])"), + inputJsonToReader( + "{", + " cells': [", + " {", + " 'address': { 'x': '3' 'y': '2' }", + " 'value': 2.0", + " }", + " ", + "}")); + } + + @Test + public void ensure_that_invalid_json_not_in_tensor_format_fails() { + expectedException.expect(InvalidConstantTensor.class); + expectedException.expectMessage("Expected field name \"cells\", got \"stats\""); + + validateTensorJson(TensorType.fromSpec("tensor(x[], y[])"), + inputJsonToReader( + "{", + " 'stats': {", + " 'パープルゴム製のアヒルは私を殺すために望んでいます': true,", + " 'points': 47", + " }", + "}")); + } +}
\ No newline at end of file diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContainerRestartValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContainerRestartValidatorTest.java index 88ba6d885b8..3bec45279d9 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContainerRestartValidatorTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContainerRestartValidatorTest.java @@ -35,6 +35,14 @@ public class ContainerRestartValidatorTest { assertTrue(result.isEmpty()); } + @Test + public void validator_returns_empty_list_for_containers_with_restart_on_deploy_disabled_where_previously_enabled() { + VespaModel current = createModel(true); + VespaModel next = createModel(false); + List<ConfigChangeAction> result = validateModel(current, next); + assertTrue(result.isEmpty()); + } + private static List<ConfigChangeAction> validateModel(VespaModel current, VespaModel next) { return new ContainerRestartValidator() .validate(current, next, new ValidationOverrides(Collections.emptyList())); diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java index bcb113687ec..126fcf7a583 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java @@ -9,6 +9,7 @@ import com.yahoo.vespa.config.content.core.StorServerConfig; import com.yahoo.vespa.config.content.FleetcontrollerConfig; import com.yahoo.vespa.config.content.StorDistributionConfig; import com.yahoo.metrics.MetricsmanagerConfig; +import com.yahoo.vespa.config.search.core.ProtonConfig; import com.yahoo.vespa.model.VespaModel; import com.yahoo.vespa.model.container.ContainerCluster; import com.yahoo.vespa.model.content.cluster.ContentCluster; @@ -34,44 +35,99 @@ public class ClusterTest extends ContentBaseTest { } @Test - public void testRedundancy() { - StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); - parse("" + - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <engine>" + - " <proton>" + - " <searchable-copies>3</searchable-copies>" + - " </proton>" + - " </engine>" + - " <redundancy reply-after=\"4\">5</redundancy>\n" + - " <group>" + - " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + - " <node hostalias=\"mockhost\" distribution-key=\"1\"/>\"" + - " <node hostalias=\"mockhost\" distribution-key=\"2\"/>\"" + - " <node hostalias=\"mockhost\" distribution-key=\"3\"/>\"" + - " <node hostalias=\"mockhost\" distribution-key=\"4\"/>\"" + - " </group>" + - "</content>" - ).getConfig(builder); + public void testHierarchicRedundancy() { + ContentCluster cc = parse("" + + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <engine>" + + " <proton>" + + " <searchable-copies>3</searchable-copies>" + + " </proton>" + + " </engine>" + + " <redundancy>15</redundancy>\n" + + " <group name='root' distribution-key='0'>" + + " <distribution partitions='1|1|*'/>" + + " <group name='g-1' distribution-key='0'>" + + " <node hostalias='mockhost' distribution-key='0'/>" + + " <node hostalias='mockhost' distribution-key='1'/>" + + " <node hostalias='mockhost' distribution-key='2'/>" + + " <node hostalias='mockhost' distribution-key='3'/>" + + " <node hostalias='mockhost' distribution-key='4'/>" + + " </group>" + + " <group name='g-2' distribution-key='1'>" + + " <node hostalias='mockhost' distribution-key='5'/>" + + " <node hostalias='mockhost' distribution-key='6'/>" + + " <node hostalias='mockhost' distribution-key='7'/>" + + " <node hostalias='mockhost' distribution-key='8'/>" + + " <node hostalias='mockhost' distribution-key='9'/>" + + " </group>" + + " <group name='g-3' distribution-key='1'>" + + " <node hostalias='mockhost' distribution-key='10'/>" + + " <node hostalias='mockhost' distribution-key='11'/>" + + " <node hostalias='mockhost' distribution-key='12'/>" + + " <node hostalias='mockhost' distribution-key='13'/>" + + " <node hostalias='mockhost' distribution-key='14'/>" + + " </group>" + + " </group>" + + "</content>" + ); + StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder(); + cc.getConfig(storBuilder); + StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder); + assertEquals(15, storConfig.initial_redundancy()); + assertEquals(15, storConfig.redundancy()); + assertEquals(3, storConfig.ready_copies()); + ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); + cc.getSearch().getConfig(protonBuilder); + ProtonConfig protonConfig = new ProtonConfig(protonBuilder); + assertEquals(1, protonConfig.distribution().searchablecopies()); + assertEquals(5, protonConfig.distribution().redundancy()); + } - StorDistributionConfig config = new StorDistributionConfig(builder); - assertEquals(4, config.initial_redundancy()); - assertEquals(5, config.redundancy()); - assertEquals(3, config.ready_copies()); + @Test + public void testRedundancy() { + ContentCluster cc = parse("" + + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <engine>" + + " <proton>" + + " <searchable-copies>3</searchable-copies>" + + " </proton>" + + " </engine>" + + " <redundancy reply-after='4'>5</redundancy>\n" + + " <group>" + + " <node hostalias='mockhost' distribution-key='0'/>" + + " <node hostalias='mockhost' distribution-key='1'/>" + + " <node hostalias='mockhost' distribution-key='2'/>" + + " <node hostalias='mockhost' distribution-key='3'/>" + + " <node hostalias='mockhost' distribution-key='4'/>" + + " </group>" + + "</content>" + ); + StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder(); + cc.getConfig(storBuilder); + StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder); + assertEquals(4, storConfig.initial_redundancy()); + assertEquals(5, storConfig.redundancy()); + assertEquals(3, storConfig.ready_copies()); + ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); + cc.getSearch().getConfig(protonBuilder); + ProtonConfig protonConfig = new ProtonConfig(protonBuilder); + assertEquals(3, protonConfig.distribution().searchablecopies()); + assertEquals(5, protonConfig.distribution().redundancy()); } @Test public void testNoId() { ContentCluster c = parse( - "<content version=\"1.0\">\n" + - " <redundancy>1</redundancy>\n" + - " <documents/>" + - " <redundancy reply-after=\"4\">5</redundancy>\n" + - " <group>" + - " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + - " </group>" + - "</content>" + "<content version=\"1.0\">\n" + + " <redundancy>1</redundancy>\n" + + " <documents/>" + + " <redundancy reply-after=\"4\">5</redundancy>\n" + + " <group>" + + " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + + " </group>" + + "</content>" ); assertEquals("content", c.getName()); @@ -81,14 +137,14 @@ public class ClusterTest extends ContentBaseTest { public void testRedundancyDefaults() { StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); parse( - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <group>" + - " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + - " <node hostalias=\"mockhost\" distribution-key=\"1\"/>\"" + - " <node hostalias=\"mockhost\" distribution-key=\"2\"/>\"" + - " </group>" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <group>" + + " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + + " <node hostalias=\"mockhost\" distribution-key=\"1\"/>\"" + + " <node hostalias=\"mockhost\" distribution-key=\"2\"/>\"" + + " </group>" + + "</content>" ).getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); @@ -99,39 +155,40 @@ public class ClusterTest extends ContentBaseTest { @Test public void testEndToEnd() throws Exception { - String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + - "<services>\n" + - "\n" + - " <admin version=\"2.0\">\n" + - " <adminserver hostalias=\"configserver\" />\n" + - " <logserver hostalias=\"logserver\" />\n" + - " <slobroks>\n" + - " <slobrok hostalias=\"configserver\" />\n" + - " <slobrok hostalias=\"logserver\" />\n" + - " </slobroks>\n" + - " <cluster-controllers>\n" + - " <cluster-controller hostalias=\"configserver\"/>" + - " <cluster-controller hostalias=\"configserver2\"/>" + - " <cluster-controller hostalias=\"configserver3\"/>" + - " </cluster-controllers>\n" + - " </admin>\n" + - " <content version='1.0' id='bar'>" + - " <redundancy>1</redundancy>\n" + - " <documents>" + - " <document type=\"type1\" mode=\"index\"/>\n" + - " <document type=\"type2\" mode=\"index\"/>\n" + - " </documents>\n" + - " <group>" + - " <node hostalias='node0' distribution-key='0' />" + - " </group>" + - " <tuning>" + - " <cluster-controller>\n" + - " <init-progress-time>34567</init-progress-time>" + - " </cluster-controller>" + - " </tuning>" + - " </content>" + - "\n" + - "</services>"; + String xml = + "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + + "<services>\n" + + "\n" + + " <admin version=\"2.0\">\n" + + " <adminserver hostalias=\"configserver\" />\n" + + " <logserver hostalias=\"logserver\" />\n" + + " <slobroks>\n" + + " <slobrok hostalias=\"configserver\" />\n" + + " <slobrok hostalias=\"logserver\" />\n" + + " </slobroks>\n" + + " <cluster-controllers>\n" + + " <cluster-controller hostalias=\"configserver\"/>" + + " <cluster-controller hostalias=\"configserver2\"/>" + + " <cluster-controller hostalias=\"configserver3\"/>" + + " </cluster-controllers>\n" + + " </admin>\n" + + " <content version='1.0' id='bar'>" + + " <redundancy>1</redundancy>\n" + + " <documents>" + + " <document type=\"type1\" mode=\"index\"/>\n" + + " <document type=\"type2\" mode=\"index\"/>\n" + + " </documents>\n" + + " <group>" + + " <node hostalias='node0' distribution-key='0' />" + + " </group>" + + " <tuning>" + + " <cluster-controller>\n" + + " <init-progress-time>34567</init-progress-time>" + + " </cluster-controller>" + + " </tuning>" + + " </content>" + + "\n" + + "</services>"; List<String> sds = ApplicationPackageUtils.generateSearchDefinitions("type1", "type2"); VespaModel model = (new VespaModelCreatorWithMockPkg(null, xml, sds)).create(); @@ -183,32 +240,33 @@ public class ClusterTest extends ContentBaseTest { @Test public void testSearchTuning() throws Exception { - String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + - "<services>\n" + - "\n" + - " <admin version=\"2.0\">\n" + - " <adminserver hostalias=\"node0\" />\n" + - " <cluster-controllers>\n" + - " <cluster-controller hostalias=\"node0\"/>" + - " </cluster-controllers>\n" + - " </admin>\n" + - " <content version='1.0' id='bar'>" + - " <redundancy>1</redundancy>\n" + - " <documents>" + - " <document type=\"type1\" mode='index'/>\n" + - " <document type=\"type2\" mode='index'/>\n" + - " </documents>\n" + - " <group>" + - " <node hostalias='node0' distribution-key='0'/>" + - " </group>" + - " <tuning>\n" + - " <cluster-controller>" + - " <init-progress-time>34567</init-progress-time>" + - " </cluster-controller>" + - " </tuning>" + - " </content>" + - "\n" + - "</services>"; + String xml = + "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + + "<services>\n" + + "\n" + + " <admin version=\"2.0\">\n" + + " <adminserver hostalias=\"node0\" />\n" + + " <cluster-controllers>\n" + + " <cluster-controller hostalias=\"node0\"/>" + + " </cluster-controllers>\n" + + " </admin>\n" + + " <content version='1.0' id='bar'>" + + " <redundancy>1</redundancy>\n" + + " <documents>" + + " <document type=\"type1\" mode='index'/>\n" + + " <document type=\"type2\" mode='index'/>\n" + + " </documents>\n" + + " <group>" + + " <node hostalias='node0' distribution-key='0'/>" + + " </group>" + + " <tuning>\n" + + " <cluster-controller>" + + " <init-progress-time>34567</init-progress-time>" + + " </cluster-controller>" + + " </tuning>" + + " </content>" + + "\n" + + "</services>"; List<String> sds = ApplicationPackageUtils.generateSearchDefinitions("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); @@ -232,21 +290,22 @@ public class ClusterTest extends ContentBaseTest { @Test public void testRedundancyRequired() throws Exception { - String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + - "<services>\n" + - "\n" + - " <admin version=\"2.0\">\n" + - " <adminserver hostalias=\"node0\" />\n" + - " </admin>\n" + - " <content version='1.0' id='bar'>" + - " <documents>" + - " <document type=\"type1\" mode='index'/>\n" + - " </documents>\n" + - " <group>\n" + - " <node hostalias='node0' distribution-key='0'/>\n" + - " </group>\n" + - " </content>\n" + - "</services>\n"; + String xml = + "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + + "<services>\n" + + "\n" + + " <admin version=\"2.0\">\n" + + " <adminserver hostalias=\"node0\" />\n" + + " </admin>\n" + + " <content version='1.0' id='bar'>" + + " <documents>" + + " <document type=\"type1\" mode='index'/>\n" + + " </documents>\n" + + " <group>\n" + + " <node hostalias='node0' distribution-key='0'/>\n" + + " </group>\n" + + " </content>\n" + + "</services>\n"; List<String> sds = ApplicationPackageUtils.generateSearchDefinitions("type1", "type2"); try{ @@ -261,12 +320,12 @@ public class ClusterTest extends ContentBaseTest { public void testRedundancyFinalLessThanInitial() { try { parse( - "<content version=\"1.0\" id=\"storage\">\n" + - " <redundancy reply-after=\"4\">2</redundancy>\n" + - " <group>" + - " <node hostalias='node0' distribution-key='0' />" + - " </group>" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <redundancy reply-after=\"4\">2</redundancy>\n" + + " <group>" + + " <node hostalias='node0' distribution-key='0' />" + + " </group>" + + "</content>" ); fail("no exception thrown"); } catch (Exception e) { @@ -277,17 +336,17 @@ public class ClusterTest extends ContentBaseTest { public void testReadyTooHigh() { try { parse( - "<content version=\"1.0\" id=\"storage\">\n" + - " <engine>" + - " <proton>" + - " <searchable-copies>3</searchable-copies>" + - " </proton>" + - " </engine>" + - " <redundancy>2</redundancy>\n" + - " <group>" + - " <node hostalias='node0' distribution-key='0' />" + - " </group>" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <engine>" + + " <proton>" + + " <searchable-copies>3</searchable-copies>" + + " </proton>" + + " </engine>" + + " <redundancy>2</redundancy>\n" + + " <group>" + + " <node hostalias='node0' distribution-key='0' />" + + " </group>" + + "</content>" ); fail("no exception thrown"); } catch (Exception e) { @@ -308,12 +367,12 @@ public class ClusterTest extends ContentBaseTest { { { FleetcontrollerConfig config = getFleetControllerConfig( - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + - " </group>\n" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + + " </group>\n" + + "</content>" ); assertEquals(0, config.min_storage_up_ratio(), 0.01); @@ -324,17 +383,17 @@ public class ClusterTest extends ContentBaseTest { { FleetcontrollerConfig config = getFleetControllerConfig( - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + - " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + - " <node distribution-key=\"2\" hostalias=\"mockhost\"/>\n" + - " <node distribution-key=\"3\" hostalias=\"mockhost\"/>\n" + - " <node distribution-key=\"4\" hostalias=\"mockhost\"/>\n" + - " <node distribution-key=\"5\" hostalias=\"mockhost\"/>\n" + - " </group>\n" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + + " <node distribution-key=\"2\" hostalias=\"mockhost\"/>\n" + + " <node distribution-key=\"3\" hostalias=\"mockhost\"/>\n" + + " <node distribution-key=\"4\" hostalias=\"mockhost\"/>\n" + + " <node distribution-key=\"5\" hostalias=\"mockhost\"/>\n" + + " </group>\n" + + "</content>" ); assertNotSame(0, config.min_storage_up_ratio()); @@ -345,12 +404,12 @@ public class ClusterTest extends ContentBaseTest { public void testImplicitDistributionBits() { ContentCluster cluster = parse( - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + - " </group>\n" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + + " </group>\n" + + "</content>" ); { @@ -368,15 +427,15 @@ public class ClusterTest extends ContentBaseTest { assertEquals(8, config.minsplitcount()); } cluster = parse( - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <engine>" + - " <vds/>" + - " </engine>" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + - " </group>\n" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <engine>" + + " <vds/>" + + " </engine>" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + + " </group>\n" + + "</content>" ); { @@ -399,15 +458,15 @@ public class ClusterTest extends ContentBaseTest { public void testExplicitDistributionBits() { ContentCluster cluster = parse( - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + - " </group>\n" + - " <tuning>\n" + - " <distribution type=\"strict\"/>\n" + - " </tuning>\n" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + + " </group>\n" + + " <tuning>\n" + + " <distribution type=\"strict\"/>\n" + + " </tuning>\n" + + "</content>" ); { @@ -425,18 +484,18 @@ public class ClusterTest extends ContentBaseTest { assertEquals(8, config.minsplitcount()); } cluster = parse( - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <engine>" + - " <vds/>" + - " </engine>" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + - " </group>\n" + - " <tuning>\n" + - " <distribution type=\"loose\"/>\n" + - " </tuning>\n" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <engine>" + + " <vds/>" + + " </engine>" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + + " </group>\n" + + " <tuning>\n" + + " <distribution type=\"loose\"/>\n" + + " </tuning>\n" + + "</content>" ); { @@ -459,16 +518,16 @@ public class ClusterTest extends ContentBaseTest { public void testGenerateSearchNodes() { ContentCluster cluster = parse( - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <engine>" + - " <proton/>" + - " </engine>" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + - " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + - " </group>\n" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <engine>" + + " <proton/>" + + " </engine>" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + + " </group>\n" + + "</content>" ); { @@ -492,16 +551,16 @@ public class ClusterTest extends ContentBaseTest { public void testAlternativeNodeSyntax() { ContentCluster cluster = parse( - "<content version=\"1.0\" id=\"test\">\n" + - " <documents/>" + - " <engine>" + - " <proton/>" + - " </engine>" + - " <nodes>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + - " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + - " </nodes>\n" + - "</content>" + "<content version=\"1.0\" id=\"test\">\n" + + " <documents/>" + + " <engine>" + + " <proton/>" + + " </engine>" + + " <nodes>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + + " </nodes>\n" + + "</content>" ); StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); @@ -519,13 +578,13 @@ public class ClusterTest extends ContentBaseTest { public void testReadyWhenInitialOne() { StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); parse( - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <redundancy>1</redundancy>\n" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + - " </group>" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <redundancy>1</redundancy>\n" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + + " </group>" + + "</content>" ).getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); @@ -536,16 +595,16 @@ public class ClusterTest extends ContentBaseTest { public void testProvider(String tagName, StorServerConfig.Persistence_provider.Type.Enum type) { ContentCluster cluster = parse( - "<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <redundancy>3</redundancy>" + - " <engine>\n" + - " <" + tagName + "/>\n" + - " </engine>\n" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + - " </group>" + - "</content>" + "<content version=\"1.0\" id=\"storage\">\n" + + " <documents/>" + + " <redundancy>3</redundancy>" + + " <engine>\n" + + " <" + tagName + "/>\n" + + " </engine>\n" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + + " </group>" + + "</content>" ); { @@ -582,11 +641,11 @@ public class ClusterTest extends ContentBaseTest { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); ContentCluster cluster = parse("<content version=\"1.0\" id=\"storage\">\n" + - " <documents/>" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + - " </group>\n" + - "</content>" + " <documents/>" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + + " </group>\n" + + "</content>" ); cluster.getConfig(builder); @@ -642,34 +701,34 @@ public class ClusterTest extends ContentBaseTest { @Test public void testConfiguredMetrics() throws Exception { String xml = "" + - "<services>" + - "<content version=\"1.0\" id=\"storage\">\n" + - " <redundancy>1</redundancy>\n" + - " <documents>" + - " <document type=\"type1\" mode='index'/>\n" + - " <document type=\"type2\" mode='index'/>\n" + - " </documents>" + - " <group>\n" + - " <node distribution-key=\"0\" hostalias=\"node0\"/>\n" + - " </group>\n" + - "</content>" + - "<admin version=\"2.0\">" + - " <logserver hostalias=\"node0\"/>" + - " <adminserver hostalias=\"node0\"/>" + - " <metric-consumers>" + - " <consumer name=\"foobar\">" + - " <metric name=\"storage.foo.bar\"/>" + - " </consumer>" + - " <consumer name=\"log\">" + - " <metric name=\"extralogmetric\"/>" + - " <metric name=\"extralogmetric3\"/>" + - " </consumer>" + - " <consumer name=\"fleetcontroller\">" + - " <metric name=\"extraextra\"/>" + - " </consumer>" + - " </metric-consumers>" + - "</admin>" + - "</services>"; + "<services>" + + "<content version=\"1.0\" id=\"storage\">\n" + + " <redundancy>1</redundancy>\n" + + " <documents>" + + " <document type=\"type1\" mode='index'/>\n" + + " <document type=\"type2\" mode='index'/>\n" + + " </documents>" + + " <group>\n" + + " <node distribution-key=\"0\" hostalias=\"node0\"/>\n" + + " </group>\n" + + "</content>" + + "<admin version=\"2.0\">" + + " <logserver hostalias=\"node0\"/>" + + " <adminserver hostalias=\"node0\"/>" + + " <metric-consumers>" + + " <consumer name=\"foobar\">" + + " <metric name=\"storage.foo.bar\"/>" + + " </consumer>" + + " <consumer name=\"log\">" + + " <metric name=\"extralogmetric\"/>" + + " <metric name=\"extralogmetric3\"/>" + + " </consumer>" + + " <consumer name=\"fleetcontroller\">" + + " <metric name=\"extraextra\"/>" + + " </consumer>" + + " </metric-consumers>" + + "</admin>" + + "</services>"; List<String> sds = ApplicationPackageUtils.generateSearchDefinitions("type1", "type2"); @@ -729,33 +788,33 @@ public class ClusterTest extends ContentBaseTest { @Test public void requireThatPreShutdownCommandIsSet() { ContentCluster cluster = parse( - "<content version=\"1.0\" id=\"storage\">" + - " <documents/>" + - " <engine>" + - " <proton>" + - " <flush-on-shutdown>true</flush-on-shutdown>" + - " </proton>" + - " </engine>" + - " <group>" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + - " </group>" + - "</content>"); + "<content version=\"1.0\" id=\"storage\">" + + " <documents/>" + + " <engine>" + + " <proton>" + + " <flush-on-shutdown>true</flush-on-shutdown>" + + " </proton>" + + " </engine>" + + " <group>" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + + " </group>" + + "</content>"); assertThat(cluster.getSearch().getSearchNodes().size(), is(1)); assertTrue(cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand().isPresent()); cluster = parse( - "<content version=\"1.0\" id=\"storage\">" + - " <documents/>" + - " <engine>" + - " <proton>" + - " <flush-on-shutdown> \n " + - " true </flush-on-shutdown>" + - " </proton>" + - " </engine>" + - " <group>" + - " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + - " </group>" + - "</content>"); + "<content version=\"1.0\" id=\"storage\">" + + " <documents/>" + + " <engine>" + + " <proton>" + + " <flush-on-shutdown> \n " + + " true </flush-on-shutdown>" + + " </proton>" + + " </engine>" + + " <group>" + + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + + " </group>" + + "</content>"); assertThat(cluster.getSearch().getSearchNodes().size(), is(1)); assertTrue(cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand().isPresent()); } diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithMockPkg.java b/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithMockPkg.java index 6f4effe0319..2f83d3bc394 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithMockPkg.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithMockPkg.java @@ -55,12 +55,13 @@ public class VespaModelCreatorWithMockPkg { VespaModel model = new VespaModel(configModelRegistry, deployState); if (validate) { try { - SchemaValidator validator = SchemaValidator.createTestValidatorHosts(); if (appPkg.getHosts() != null) { - validator.validate(appPkg.getHosts()); + SchemaValidator.createTestValidatorHosts().validate(appPkg.getHosts()); } - validator = SchemaValidator.createTestValidatorServices(); - validator.validate(appPkg.getServices()); + if (appPkg.getDeployment().isPresent()) { + SchemaValidator.createTestValidatorDeployment().validate(appPkg.getDeployment().get()); + } + SchemaValidator.createTestValidatorServices().validate(appPkg.getServices()); } catch (Exception e) { System.err.println(e.getClass()); throw e instanceof RuntimeException ? (RuntimeException) e : new RuntimeException(e); diff --git a/config-model/src/test/schema-test-files/services-hosted.xml b/config-model/src/test/schema-test-files/services-hosted.xml index f85a5352d23..195733c75a5 100644 --- a/config-model/src/test/schema-test-files/services-hosted.xml +++ b/config-model/src/test/schema-test-files/services-hosted.xml @@ -7,7 +7,11 @@ </admin> <jdisc id="container1" version="1.0"> - <nodes count="5" flavor="medium"/> + <nodes count="5" flavor="medium" required="true"/> + </jdisc> + + <jdisc id="container1" version="1.0"> + <nodes of="5" required="true"/> </jdisc> <container id="container2" version="1.0"> diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java index 5e02e1105ae..7894b722b58 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java @@ -16,11 +16,14 @@ public final class Capacity { private final boolean required; private final Optional<String> flavor; + + private final NodeType type; - private Capacity(int nodeCount, boolean required, Optional<String> flavor) { + private Capacity(int nodeCount, boolean required, Optional<String> flavor, NodeType type) { this.nodeCount = nodeCount; this.flavor = flavor; this.required = required; + this.type = type; } /** Returns the number of nodes requested */ @@ -35,6 +38,13 @@ public final class Capacity { */ public Optional<String> flavor() { return flavor; } + /** + * Returns the node type (role) requested. This is tenant nodes by default. + * If some other type is requested the node count and flavor may be ignored + * and all nodes of the requested type returned instead. + */ + public NodeType type() { return type; } + @Override public String toString() { return nodeCount + " nodes " + ( flavor.isPresent() ? "of flavor " + flavor.get() : "(default flavor)" ); @@ -50,7 +60,7 @@ public final class Capacity { } /** Creates this from a desired node count: The request may be satisfied with a smaller number of nodes. */ public static Capacity fromNodeCount(int nodeCount, Optional<String> flavor) { - return new Capacity(nodeCount, false, flavor); + return new Capacity(nodeCount, false, flavor, NodeType.tenant); } /** Creates this from a required node count: Requests must fail unless the node count can be satisfied exactly */ @@ -63,7 +73,16 @@ public final class Capacity { } /** Creates this from a required node count: Requests must fail unless the node count can be satisfied exactly */ public static Capacity fromRequiredNodeCount(int nodeCount, Optional<String> flavor) { - return new Capacity(nodeCount, true, flavor); + return new Capacity(nodeCount, true, flavor, NodeType.tenant); + } + + public static Capacity fromNodeCount(int nodeCount, Optional<String> flavor, boolean required) { + return new Capacity(nodeCount, required, flavor, NodeType.tenant); + } + + /** Creates this from a node type */ + public static Capacity fromRequiredNodeType(NodeType type) { + return new Capacity(0, true, Optional.empty(), type); } } diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeType.java b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeType.java new file mode 100644 index 00000000000..f77b40bc67e --- /dev/null +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeType.java @@ -0,0 +1,19 @@ +package com.yahoo.config.provision; + +/** + * The possible types of nodes in the node repository + * + * @author bratseth + */ +public enum NodeType { + + /** A host of a set of (docker) tenant nodes */ + host, + + /** Nodes running the shared proxy layer */ + proxy, + + /** A node to be assigned to a tenant to run application workloads */ + tenant + +} diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Quota.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Quota.java index d09a401bfaa..72323e995aa 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/Quota.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Quota.java @@ -4,6 +4,8 @@ package com.yahoo.config.provision; /** * @author hmusum */ +// TODO: Remove when there is no release older than 6.33 in use +@SuppressWarnings("unused") public class Quota { private final int numberOfHosts; diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java index 8775d92417e..30c85efe93b 100644 --- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java +++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java @@ -91,7 +91,7 @@ public class RpcConfigSourceClient extends ConfigSourceClient { target.close(); } String extra = ""; - log.log(LogLevel.ERROR, "Could not connect to any config source in set " + configSourceSet.toString() + + log.log(LogLevel.WARNING, "Could not connect to any config source in set " + configSourceSet.toString() + ", please make sure config server(s) are running. " + extra); } return false; diff --git a/config/pom.xml b/config/pom.xml index 73c8cd5b5ad..3518426a7bd 100755 --- a/config/pom.xml +++ b/config/pom.xml @@ -62,11 +62,6 @@ <scope>test</scope> </dependency> <dependency> - <groupId>uk.co.datumedge</groupId> - <artifactId>hamcrest-json</artifactId> - <scope>test</scope> - </dependency> - <dependency> <groupId>com.google.guava</groupId> <artifactId>guava</artifactId> <version>13.0.1</version> diff --git a/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java b/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java index 2322726057e..619dd333b51 100644 --- a/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java +++ b/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java @@ -27,6 +27,7 @@ import com.yahoo.vespa.config.TimingValues; * @since 5.1 */ public class ConfigSubscriber { + private Logger log = Logger.getLogger(getClass().getName()); private State state = State.OPEN; protected List<ConfigHandle<? extends ConfigInstance>> subscriptionHandles = new ArrayList<>(); diff --git a/config/src/test/java/com/yahoo/config/subscription/CfgConfigPayloadBuilderTest.java b/config/src/test/java/com/yahoo/config/subscription/CfgConfigPayloadBuilderTest.java index 7ad7144a1c8..2bc1a10b729 100644 --- a/config/src/test/java/com/yahoo/config/subscription/CfgConfigPayloadBuilderTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/CfgConfigPayloadBuilderTest.java @@ -11,8 +11,8 @@ import org.junit.Test; import java.util.Arrays; import java.util.List; -import static com.yahoo.config.subscription.util.JsonHelper.assertJsonEquals; -import static com.yahoo.config.subscription.util.JsonHelper.inputJson; +import static com.yahoo.test.json.JsonTestHelper.assertJsonEquals; +import static com.yahoo.test.json.JsonTestHelper.inputJson; import static org.junit.Assert.assertEquals; /** diff --git a/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceSerializerTest.java b/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceSerializerTest.java index d3713eaa401..342e50821f4 100644 --- a/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceSerializerTest.java +++ b/config/src/test/java/com/yahoo/config/subscription/ConfigInstanceSerializerTest.java @@ -1,22 +1,21 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.config.subscription; -import com.yahoo.foo.ArraytypesConfig; import com.yahoo.config.ConfigInstance; +import com.yahoo.foo.ArraytypesConfig; +import com.yahoo.foo.MaptypesConfig; import com.yahoo.foo.SimpletypesConfig; import com.yahoo.foo.SpecialtypesConfig; import com.yahoo.foo.StructtypesConfig; -import com.yahoo.foo.MaptypesConfig; import com.yahoo.slime.JsonFormat; import com.yahoo.slime.Slime; - import org.junit.Test; import java.io.ByteArrayOutputStream; import java.io.IOException; -import static com.yahoo.config.subscription.util.JsonHelper.assertJsonEquals; -import static com.yahoo.config.subscription.util.JsonHelper.inputJson; +import static com.yahoo.test.json.JsonTestHelper.assertJsonEquals; +import static com.yahoo.test.json.JsonTestHelper.inputJson; import static org.junit.Assert.fail; /** diff --git a/config/src/vespa/config/frt/protocol.cpp b/config/src/vespa/config/frt/protocol.cpp index be02bc59862..5c7af53d2af 100644 --- a/config/src/vespa/config/frt/protocol.cpp +++ b/config/src/vespa/config/frt/protocol.cpp @@ -6,6 +6,7 @@ LOG_SETUP(".config.frt.protocol"); #include <vespa/vespalib/util/stringfmt.h> using namespace vespalib; +using vespalib::alloc::Alloc; using namespace vespalib::slime; namespace config { @@ -58,15 +59,15 @@ const Memory RESPONSE_COMPRESSION_INFO_UNCOMPRESSED_SIZE = "uncompressedSize"; DecompressedData decompress_lz4(const char * input, uint32_t inputLen, int uncompressedLength) { - DefaultAlloc::UP memory(new DefaultAlloc(uncompressedLength)); - int sz = LZ4_decompress_safe(input, static_cast<char *>(memory->get()), inputLen, uncompressedLength); + Alloc memory( DefaultAlloc::create(uncompressedLength)); + int sz = LZ4_decompress_safe(input, static_cast<char *>(memory.get()), inputLen, uncompressedLength); if (sz >= 0 && sz != uncompressedLength) { if (LOG_WOULD_LOG(debug)) { LOG(debug, "Returned compressed size (%d) is not the same as uncompressed size(%d)", sz, uncompressedLength); } - DefaultAlloc * copy = new DefaultAlloc(sz); - memcpy(copy->get(), memory->get(), sz); - memory.reset(copy); + Alloc copy = memory.create(sz); + memcpy(copy.get(), memory.get(), sz); + memory = std::move(copy); } assert(sz >= 0); return DecompressedData(std::move(memory), static_cast<uint32_t>(sz)); diff --git a/config/src/vespa/config/frt/protocol.h b/config/src/vespa/config/frt/protocol.h index 79d916bf3e7..6d1c2b8226c 100644 --- a/config/src/vespa/config/frt/protocol.h +++ b/config/src/vespa/config/frt/protocol.h @@ -59,9 +59,9 @@ extern const vespalib::slime::Memory RESPONSE_COMPRESSION_INFO_TYPE; extern const vespalib::slime::Memory RESPONSE_COMPRESSION_INFO_UNCOMPRESSED_SIZE; struct DecompressedData { - DecompressedData(vespalib::DefaultAlloc::UP mem, uint32_t sz) + DecompressedData(vespalib::alloc::Alloc mem, uint32_t sz) : memory(std::move(mem)), - memRef(static_cast<const char *>(memory->get()), sz), + memRef(static_cast<const char *>(memory.get()), sz), size(sz) { } DecompressedData(const vespalib::slime::Memory & mem, uint32_t sz) @@ -70,7 +70,7 @@ struct DecompressedData { size(sz) {} - vespalib::DefaultAlloc::UP memory; + vespalib::alloc::Alloc memory; vespalib::slime::Memory memRef; uint32_t size; }; diff --git a/configdefinitions/src/vespa/zookeeper-server.def b/configdefinitions/src/vespa/zookeeper-server.def index b2f697a1488..b460b417105 100644 --- a/configdefinitions/src/vespa/zookeeper-server.def +++ b/configdefinitions/src/vespa/zookeeper-server.def @@ -4,6 +4,9 @@ namespace=cloud.config # Vespa home is prepended if the file is relative zooKeeperConfigFile string default="conf/zookeeper/zookeeper.cfg" +# For more info about the values below, see ZooKeeper documentation + +# tick time in milliseconds tickTime int default=2000 initLimit int default=20 syncLimit int default=15 @@ -19,6 +22,7 @@ clientPort int default=2181 # normal zone, a snapRetainCount of 15 gives 3-4 hours of logs before they're # purged. snapshotCount int default=50000 +# Purge interval in hours autopurge.purgeInterval int default=1 autopurge.snapRetainCount int default=15 diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java index 6ed1ddf6c7e..a5852d1dd8a 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java @@ -2,16 +2,27 @@ package com.yahoo.vespa.config.server; import com.yahoo.cloud.config.ConfigserverConfig; + import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.Provisioner; +import com.yahoo.config.provision.Zone; +import com.yahoo.container.jdisc.HttpRequest; +import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.log.LogLevel; import com.yahoo.transaction.NestedTransaction; +import com.yahoo.vespa.config.server.application.Application; +import com.yahoo.vespa.config.server.application.ApplicationConvergenceChecker; +import com.yahoo.vespa.config.server.application.LogServerLogGrabber; import com.yahoo.vespa.config.server.application.TenantApplications; import com.yahoo.vespa.config.server.deploy.Deployment; +import com.yahoo.vespa.config.server.http.ContentHandler; +import com.yahoo.vespa.config.server.http.SessionHandler; +import com.yahoo.vespa.config.server.http.v2.ApplicationContentRequest; import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; import com.yahoo.vespa.config.server.session.LocalSession; import com.yahoo.vespa.config.server.session.LocalSessionRepo; +import com.yahoo.vespa.config.server.session.RemoteSession; import com.yahoo.vespa.config.server.session.SilentDeployLogger; import com.yahoo.vespa.config.server.tenant.ActivateLock; import com.yahoo.vespa.config.server.tenant.Rotations; @@ -19,6 +30,8 @@ import com.yahoo.vespa.config.server.tenant.Tenant; import com.yahoo.vespa.config.server.tenant.Tenants; import com.yahoo.vespa.curator.Curator; +import java.io.IOException; +import java.net.URI; import java.time.Clock; import java.time.Duration; import java.util.Optional; @@ -34,20 +47,29 @@ import java.util.logging.Logger; public class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); - + private final Tenants tenants; private final Optional<Provisioner> hostProvisioner; private final ConfigserverConfig configserverConfig; private final Curator curator; + private final LogServerLogGrabber logServerLogGrabber; + private final ApplicationConvergenceChecker convergeChecker; + private final ContentHandler contentHandler = new ContentHandler(); private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); - public ApplicationRepository(Tenants tenants, HostProvisionerProvider hostProvisionerProvider, - ConfigserverConfig configserverConfig, Curator curator) { + public ApplicationRepository(Tenants tenants, + HostProvisionerProvider hostProvisionerProvider, + ConfigserverConfig configserverConfig, + Curator curator, + LogServerLogGrabber logServerLogGrabber, + ApplicationConvergenceChecker applicationConvergenceChecker) { this.tenants = tenants; this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); this.configserverConfig = configserverConfig; this.curator = curator; + this.logServerLogGrabber = logServerLogGrabber; + this.convergeChecker = applicationConvergenceChecker; this.clock = Clock.systemUTC(); } @@ -74,7 +96,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye configserverConfig, hostProvisioner, new ActivateLock(curator, tenant.getPath()), - timeout, + timeout, clock, /* already deployed, validate: */ false)); } @@ -87,9 +109,9 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye timeout, clock); } - /** + /** * Removes a previously deployed application - * + * * @return true if the application was found and removed, false if it was not present * @throws RuntimeException if the remove transaction fails. This method is exception safe. */ @@ -99,7 +121,7 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye TenantApplications tenantApplications = owner.get().getApplicationRepo(); if ( ! tenantApplications.listApplications().contains(applicationId)) return false; - + // TODO: Push lookup logic down long sessionId = tenantApplications.getSessionIdForApplication(applicationId); LocalSessionRepo localSessionRepo = owner.get().getLocalSessionRepo(); @@ -122,4 +144,40 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye return true; } + public String grabLog(Tenant tenant, ApplicationId applicationId) { + Application application = getApplication(tenant, applicationId); + return logServerLogGrabber.grabLog(application); + } + + public HttpResponse nodeConvergenceCheck(Tenant tenant, ApplicationId applicationId, String hostname, URI uri) { + Application application = getApplication(tenant, applicationId); + return convergeChecker.nodeConvergenceCheck(application, hostname, uri); + } + + public void waitForConfigConverged(Tenant tenant, ApplicationId applicationId, TimeoutBudget timeoutBudget) throws IOException { + Application application = getApplication(tenant, applicationId); + convergeChecker.waitForConfigConverged(application, timeoutBudget); + } + + public HttpResponse listConfigConvergence(Tenant tenant, ApplicationId applicationId, URI uri) { + Application application = getApplication(tenant, applicationId); + return convergeChecker.listConfigConvergence(application, uri); + } + + public Long getApplicationGeneration(Tenant tenant, ApplicationId applicationId) { + return getApplication(tenant, applicationId).getApplicationGeneration(); + } + + public HttpResponse getContent(Tenant tenant, ApplicationId applicationId, Zone zone, HttpRequest request) { + LocalSession session = SessionHandler.getSessionFromRequest(tenant.getLocalSessionRepo(), + tenant.getApplicationRepo().getSessionIdForApplication(applicationId)); + return contentHandler.get(ApplicationContentRequest.create(request, session, applicationId, zone)); + } + + private Application getApplication(Tenant tenant, ApplicationId applicationId) { + long sessionId = tenant.getApplicationRepo().getSessionIdForApplication(applicationId); + RemoteSession session = tenant.getRemoteSessionRepo().getSession(sessionId, 0); + return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty()); + } + } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java index 88489aaa17b..214f0defedf 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java @@ -1,167 +1,86 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server; -import com.google.inject.Inject; -import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.config.ConfigInstance; +import com.yahoo.config.ConfigurationRuntimeException; +import com.yahoo.config.codegen.DefParser; +import com.yahoo.config.codegen.InnerCNode; import com.yahoo.config.model.api.ConfigDefinitionRepo; -import com.yahoo.config.provision.Version; -import com.yahoo.config.provision.Zone; -import com.yahoo.log.LogLevel; +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.vespa.config.ConfigDefinitionKey; import com.yahoo.vespa.config.ConfigKey; +import com.yahoo.vespa.config.ConfigPayload; import com.yahoo.vespa.config.GetConfigRequest; +import com.yahoo.vespa.config.buildergen.ConfigDefinition; import com.yahoo.vespa.config.protocol.ConfigResponse; -import com.yahoo.vespa.config.server.application.Application; -import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.TenantName; -import com.yahoo.vespa.config.GenerationCounter; -import com.yahoo.vespa.config.server.application.ApplicationSet; +import com.yahoo.vespa.config.protocol.DefContent; import com.yahoo.vespa.config.server.model.SuperModel; import com.yahoo.vespa.config.server.rpc.ConfigResponseFactory; -import com.yahoo.vespa.config.server.rpc.ConfigResponseFactoryFactory; import java.io.IOException; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Optional; -import java.util.Set; +import java.io.StringReader; /** - * Controls the lifetime of the {@link SuperModel} and the {@link SuperModelRequestHandler}. + * Handler for global configs that must be resolved using the global SuperModel instance. Deals with + * reloading of config as well. * * @author lulf * @since 5.9 */ -public class SuperModelController implements RequestHandler { +public class SuperModelController { - private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(SuperModelController.class.getName()); - private volatile SuperModelRequestHandler handler; - private final GenerationCounter generationCounter; - private final Zone zone; - private final long masterGeneration; + private final SuperModel model; + private final long generation; private final ConfigDefinitionRepo configDefinitionRepo; private final ConfigResponseFactory responseFactory; - private volatile boolean enabled = false; - /** - * Creates a supermodel controller - * - * @param generationCounter this will be the SuperModelGenerationCounter in production - */ - @Inject - public SuperModelController(GenerationCounter generationCounter, ConfigDefinitionRepo configDefinitionRepo, ConfigserverConfig configserverConfig) { - this.generationCounter = generationCounter; + public SuperModelController(SuperModel model, ConfigDefinitionRepo configDefinitionRepo, long generation, ConfigResponseFactory responseFactory) { + this.model = model; this.configDefinitionRepo = configDefinitionRepo; - this.masterGeneration = configserverConfig.masterGeneration(); - this.responseFactory = ConfigResponseFactoryFactory.createFactory(configserverConfig); - this.zone = new Zone(configserverConfig); - this.handler = createNewHandler(Collections.emptyMap()); + this.generation = generation; + this.responseFactory = responseFactory; } /** - * Signals that config has been reloaded for an {@link com.yahoo.vespa.config.server.application.Application} - * belonging to a tenant. + * Resolves global config for given request. * - * TODO: This is a bit too complex I think. - * - * @param tenant Name of tenant owning the application. - * @param applicationSet The reloaded set of {@link com.yahoo.vespa.config.server.application.Application}. + * @param request The {@link com.yahoo.vespa.config.GetConfigRequest} to find config for. + * @return a {@link com.yahoo.vespa.config.protocol.ConfigResponse} containing the response for this request. + * @throws java.lang.IllegalArgumentException if no such config was found. */ - public synchronized void reloadConfig(TenantName tenant, ApplicationSet applicationSet) { - Map<TenantName, Map<ApplicationId, Application>> newModels = createModelCopy(); - if (!newModels.containsKey(tenant)) { - newModels.put(tenant, new LinkedHashMap<>()); + public ConfigResponse resolveConfig(GetConfigRequest request) { + ConfigKey<?> configKey = request.getConfigKey(); + InnerCNode targetDef = getConfigDefinition(request.getConfigKey(), request.getDefContent()); + try { + ConfigPayload payload = model.getConfig(configKey); + return responseFactory.createResponse(payload, targetDef, generation); + } catch (IOException e) { + throw new ConfigurationRuntimeException("Unable to resolve config", e); } - // TODO: Should supermodel care about multiple versions? - newModels.get(tenant).put(applicationSet.getId(), applicationSet.getForVersionOrLatest(Optional.empty())); - handler = createNewHandler(newModels); } - public synchronized void removeApplication(ApplicationId applicationId) { - Map<TenantName, Map<ApplicationId, Application>> newModels = createModelCopy(); - if (newModels.containsKey(applicationId.tenant())) { - newModels.get(applicationId.tenant()).remove(applicationId); - if (newModels.get(applicationId.tenant()).isEmpty()) { - newModels.remove(applicationId.tenant()); + private InnerCNode getConfigDefinition(ConfigKey<?> configKey, DefContent defContent) { + if (defContent.isEmpty()) { + ConfigDefinitionKey configDefinitionKey = new ConfigDefinitionKey(configKey.getName(), configKey.getNamespace()); + ConfigDefinition configDefinition = configDefinitionRepo.getConfigDefinitions().get(configDefinitionKey); + if (configDefinition == null) { + throw new UnknownConfigDefinitionException("Unable to find config definition for '" + configKey.getNamespace() + "." + configKey.getName()); } - } - handler = createNewHandler(newModels); - } - - private SuperModelRequestHandler createNewHandler(Map<TenantName, Map<ApplicationId, Application>> newModels) { - long generation = generationCounter.get() + masterGeneration; - SuperModel model = new SuperModel(newModels, zone); - return new SuperModelRequestHandler(model, configDefinitionRepo, generation, responseFactory); - } - - private Map<TenantName, Map<ApplicationId, Application>> getCurrentModels() { - if (handler != null) { - return handler.getSuperModel().getCurrentModels(); + return configDefinition.getCNode(); } else { - return new LinkedHashMap<>(); + DefParser dParser = new DefParser(configKey.getName(), new StringReader(defContent.asString())); + return dParser.getTree(); } } - private Map<TenantName, Map<ApplicationId, Application>> createModelCopy() { - Map<TenantName, Map<ApplicationId, Application>> currentModels = getCurrentModels(); - Map<TenantName, Map<ApplicationId, Application>> newModels = new LinkedHashMap<>(); - for (Map.Entry<TenantName, Map<ApplicationId, Application>> entry : currentModels.entrySet()) { - Map<ApplicationId, Application> appMap = new LinkedHashMap<>(); - newModels.put(entry.getKey(), appMap); - for (Map.Entry<ApplicationId, Application> appEntry : entry.getValue().entrySet()) { - appMap.put(appEntry.getKey(), appEntry.getValue()); - } - } - return newModels; + SuperModel getSuperModel() { + return model; } - public SuperModelRequestHandler getHandler() { return handler; } - - @Override - public ConfigResponse resolveConfig(ApplicationId appId, GetConfigRequest req, Optional<Version> vespaVersion) { - log.log(LogLevel.DEBUG, "SuperModelController resolving " + req + " for app id '" + appId + "'"); - if (handler != null) { - return handler.resolveConfig(req); - } - return null; - } + long getGeneration() { return generation; } public <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> configClass, ApplicationId applicationId, String configId) throws IOException { - return handler.getConfig(configClass, applicationId, configId); - } - - @Override - public Set<ConfigKey<?>> listConfigs(ApplicationId appId, Optional<Version> vespaVersion, boolean recursive) { - throw new UnsupportedOperationException(); - } - - @Override - public Set<ConfigKey<?>> listNamedConfigs(ApplicationId appId, Optional<Version> vespaVersion, ConfigKey<?> key, boolean recursive) { - throw new UnsupportedOperationException(); - } - - @Override - public Set<ConfigKey<?>> allConfigsProduced(ApplicationId appId, Optional<Version> vespaVersion) { - throw new UnsupportedOperationException(); + return model.getConfig(configClass, applicationId, configId); } - @Override - public Set<String> allConfigIds(ApplicationId appID, Optional<Version> vespaVersion) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean hasApplication(ApplicationId appId, Optional<Version> vespaVersion) { - return enabled && appId.equals(ApplicationId.global()); - } - - @Override - public ApplicationId resolveApplicationId(String hostName) { - return ApplicationId.global(); - } - - public void enable() { - enabled = true; - } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java index b08ac0827a1..9291e6030e2 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java @@ -1,84 +1,166 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server; +import com.google.inject.Inject; +import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.config.ConfigInstance; -import com.yahoo.config.ConfigurationRuntimeException; -import com.yahoo.config.codegen.DefParser; -import com.yahoo.config.codegen.InnerCNode; import com.yahoo.config.model.api.ConfigDefinitionRepo; -import com.yahoo.config.provision.ApplicationId; -import com.yahoo.vespa.config.ConfigDefinitionKey; +import com.yahoo.config.provision.Version; +import com.yahoo.config.provision.Zone; +import com.yahoo.log.LogLevel; import com.yahoo.vespa.config.ConfigKey; -import com.yahoo.vespa.config.ConfigPayload; import com.yahoo.vespa.config.GetConfigRequest; -import com.yahoo.vespa.config.buildergen.ConfigDefinition; import com.yahoo.vespa.config.protocol.ConfigResponse; -import com.yahoo.vespa.config.protocol.DefContent; +import com.yahoo.vespa.config.server.application.Application; +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.TenantName; +import com.yahoo.vespa.config.GenerationCounter; +import com.yahoo.vespa.config.server.application.ApplicationSet; import com.yahoo.vespa.config.server.model.SuperModel; import com.yahoo.vespa.config.server.rpc.ConfigResponseFactory; +import com.yahoo.vespa.config.server.rpc.ConfigResponseFactoryFactory; import java.io.IOException; -import java.io.StringReader; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Optional; +import java.util.Set; /** - * Handler for global configs that must be resolved using the global SuperModel instance. Deals with - * reloading of config as well. + * Handles request for supermodel config * * @author lulf * @since 5.9 */ -public class SuperModelRequestHandler { - private final SuperModel model; - private final long generation; +public class SuperModelRequestHandler implements RequestHandler { + + private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(SuperModelRequestHandler.class.getName()); + private volatile SuperModelController handler; + private final GenerationCounter generationCounter; + private final Zone zone; + private final long masterGeneration; private final ConfigDefinitionRepo configDefinitionRepo; private final ConfigResponseFactory responseFactory; + private volatile boolean enabled = false; - public SuperModelRequestHandler(SuperModel model, ConfigDefinitionRepo configDefinitionRepo, long generation, ConfigResponseFactory responseFactory) { - this.model = model; + /** + * Creates a supermodel controller + */ + @Inject + public SuperModelRequestHandler(GenerationCounter generationCounter, ConfigDefinitionRepo configDefinitionRepo, + ConfigserverConfig configserverConfig) { + this.generationCounter = generationCounter; this.configDefinitionRepo = configDefinitionRepo; - this.generation = generation; - this.responseFactory = responseFactory; + this.masterGeneration = configserverConfig.masterGeneration(); + this.responseFactory = ConfigResponseFactoryFactory.createFactory(configserverConfig); + this.zone = new Zone(configserverConfig); + this.handler = createNewHandler(Collections.emptyMap()); } /** - * Resolves global config for given request. + * Signals that config has been reloaded for an {@link com.yahoo.vespa.config.server.application.Application} + * belonging to a tenant. + * + * TODO: This is a bit too complex I think. * - * @param request The {@link com.yahoo.vespa.config.GetConfigRequest} to find config for. - * @return a {@link com.yahoo.vespa.config.protocol.ConfigResponse} containing the response for this request. - * @throws java.lang.IllegalArgumentException if no such config was found. + * @param tenant Name of tenant owning the application. + * @param applicationSet The reloaded set of {@link com.yahoo.vespa.config.server.application.Application}. */ - public ConfigResponse resolveConfig(GetConfigRequest request) { - ConfigKey<?> configKey = request.getConfigKey(); - InnerCNode targetDef = getConfigDefinition(request.getConfigKey(), request.getDefContent()); - try { - ConfigPayload payload = model.getConfig(configKey); - return responseFactory.createResponse(payload, targetDef, generation); - } catch (IOException e) { - throw new ConfigurationRuntimeException("Unable to resolve config", e); + public synchronized void reloadConfig(TenantName tenant, ApplicationSet applicationSet) { + Map<TenantName, Map<ApplicationId, Application>> newModels = createModelCopy(); + if (!newModels.containsKey(tenant)) { + newModels.put(tenant, new LinkedHashMap<>()); } + // TODO: Should supermodel care about multiple versions? + newModels.get(tenant).put(applicationSet.getId(), applicationSet.getForVersionOrLatest(Optional.empty())); + handler = createNewHandler(newModels); } - private InnerCNode getConfigDefinition(ConfigKey<?> configKey, DefContent defContent) { - if (defContent.isEmpty()) { - ConfigDefinitionKey configDefinitionKey = new ConfigDefinitionKey(configKey.getName(), configKey.getNamespace()); - ConfigDefinition configDefinition = configDefinitionRepo.getConfigDefinitions().get(configDefinitionKey); - if (configDefinition == null) { - throw new UnknownConfigDefinitionException("Unable to find config definition for '" + configKey.getNamespace() + "." + configKey.getName()); + public synchronized void removeApplication(ApplicationId applicationId) { + Map<TenantName, Map<ApplicationId, Application>> newModels = createModelCopy(); + if (newModels.containsKey(applicationId.tenant())) { + newModels.get(applicationId.tenant()).remove(applicationId); + if (newModels.get(applicationId.tenant()).isEmpty()) { + newModels.remove(applicationId.tenant()); } - return configDefinition.getCNode(); + } + handler = createNewHandler(newModels); + } + + private SuperModelController createNewHandler(Map<TenantName, Map<ApplicationId, Application>> newModels) { + long generation = generationCounter.get() + masterGeneration; + SuperModel model = new SuperModel(newModels, zone); + return new SuperModelController(model, configDefinitionRepo, generation, responseFactory); + } + + private Map<TenantName, Map<ApplicationId, Application>> getCurrentModels() { + if (handler != null) { + return handler.getSuperModel().applicationModels(); } else { - DefParser dParser = new DefParser(configKey.getName(), new StringReader(defContent.asString())); - return dParser.getTree(); + return new LinkedHashMap<>(); } } - SuperModel getSuperModel() { - return model; + private Map<TenantName, Map<ApplicationId, Application>> createModelCopy() { + Map<TenantName, Map<ApplicationId, Application>> currentModels = getCurrentModels(); + Map<TenantName, Map<ApplicationId, Application>> newModels = new LinkedHashMap<>(); + for (Map.Entry<TenantName, Map<ApplicationId, Application>> entry : currentModels.entrySet()) { + Map<ApplicationId, Application> appMap = new LinkedHashMap<>(); + newModels.put(entry.getKey(), appMap); + for (Map.Entry<ApplicationId, Application> appEntry : entry.getValue().entrySet()) { + appMap.put(appEntry.getKey(), appEntry.getValue()); + } + } + return newModels; } - long getGeneration() { return generation; } + public SuperModelController getHandler() { return handler; } + + @Override + public ConfigResponse resolveConfig(ApplicationId appId, GetConfigRequest req, Optional<Version> vespaVersion) { + log.log(LogLevel.DEBUG, "SuperModelRequestHandler resolving " + req + " for app id '" + appId + "'"); + if (handler != null) { + return handler.resolveConfig(req); + } + return null; + } public <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> configClass, ApplicationId applicationId, String configId) throws IOException { - return model.getConfig(configClass, applicationId, configId); + return handler.getConfig(configClass, applicationId, configId); + } + + @Override + public Set<ConfigKey<?>> listConfigs(ApplicationId appId, Optional<Version> vespaVersion, boolean recursive) { + throw new UnsupportedOperationException(); + } + + @Override + public Set<ConfigKey<?>> listNamedConfigs(ApplicationId appId, Optional<Version> vespaVersion, ConfigKey<?> key, boolean recursive) { + throw new UnsupportedOperationException(); + } + + @Override + public Set<ConfigKey<?>> allConfigsProduced(ApplicationId appId, Optional<Version> vespaVersion) { + throw new UnsupportedOperationException(); + } + + @Override + public Set<String> allConfigIds(ApplicationId appID, Optional<Version> vespaVersion) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasApplication(ApplicationId appId, Optional<Version> vespaVersion) { + return enabled && appId.equals(ApplicationId.global()); + } + + @Override + public ApplicationId resolveApplicationId(String hostName) { + return ApplicationId.global(); + } + + public void enable() { + enabled = true; } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/LogServerLogGrabber.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/LogServerLogGrabber.java index 2ce95e016f4..fdec3939f2f 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/LogServerLogGrabber.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/LogServerLogGrabber.java @@ -4,19 +4,14 @@ package com.yahoo.vespa.config.server.application; import com.yahoo.cloud.config.ModelConfig; import com.yahoo.component.AbstractComponent; import com.google.inject.Inject; -import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.log.LogLevel; -import com.yahoo.vespa.config.server.http.HttpConfigResponse; -import com.yahoo.vespa.config.server.http.HttpErrorResponse; +import com.yahoo.vespa.config.server.http.InternalServerException; import com.yahoo.yolean.Exceptions; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; -import java.io.OutputStream; -import java.io.PrintWriter; import java.net.Socket; -import java.nio.charset.StandardCharsets; import java.util.Optional; /** @@ -28,7 +23,6 @@ import java.util.Optional; public class LogServerLogGrabber extends AbstractComponent { private static final java.util.logging.Logger log = java.util.logging.Logger.getLogger(LogServerLogGrabber.class.getName()); - @Inject public LogServerLogGrabber() {} private Optional<Integer> getErrorLogPort(ModelConfig.Hosts.Services service) { @@ -43,7 +37,7 @@ public class LogServerLogGrabber extends AbstractComponent { int port; } - public HttpResponse grabLog(Application application) { + public String grabLog(Application application) { final ModelConfig config; try { @@ -61,7 +55,7 @@ public class LogServerLogGrabber extends AbstractComponent { Optional<Integer> logPort = getErrorLogPort(logService); if (logPort.isPresent()) { if (logServerConnectionInfo.hostName != null) { - throw new RuntimeException("Found several log server ports."); + throw new RuntimeException("Found several log server ports"); } logServerConnectionInfo.hostName = host.name(); logServerConnectionInfo.port = logPort.get(); @@ -69,14 +63,7 @@ public class LogServerLogGrabber extends AbstractComponent { })); if (logServerConnectionInfo.hostName == null) { - return new HttpResponse(503) { - @Override - public void render(OutputStream outputStream) throws IOException { - PrintWriter printWriter = new PrintWriter(outputStream); - printWriter.print("Did not find any log server in config model."); - printWriter.close(); - } - }; + throw new InternalServerException("Did not find any log server in config model"); } log.log(LogLevel.DEBUG, "Requested error logs, pulling from logserver on " + logServerConnectionInfo.hostName + " " + logServerConnectionInfo.port); @@ -85,20 +72,9 @@ public class LogServerLogGrabber extends AbstractComponent { response = readLog(logServerConnectionInfo.hostName, logServerConnectionInfo.port); log.log(LogLevel.DEBUG, "Requested error logs was " + response.length() + " characters"); } catch (IOException e) { - return HttpErrorResponse.internalServerError(Exceptions.toMessageString(e)); + throw new InternalServerException(Exceptions.toMessageString(e)); } - - return new HttpResponse(200) { - @Override - public void render(OutputStream outputStream) throws IOException { - outputStream.write(response.getBytes(StandardCharsets.UTF_8)); - } - - @Override - public String getContentType() { - return HttpConfigResponse.JSON_CONTENT_TYPE; - } - }; + return response; } private String readLog(String host, int port) throws IOException { diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java index 58d651ae33a..37cea22e420 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java @@ -43,12 +43,4 @@ public class FileDBRegistry implements FileRegistry { return entries; } - @Override - public Set<String> allRelativePaths() { - Set<String> ret = new HashSet<>(); - for (Entry entry : entries) { - ret.add(entry.relativePath); - } - return ret; - } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/SessionActiveHandlerBase.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/SessionActiveHandlerBase.java index cc4689682dd..b54c722a7e4 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/SessionActiveHandlerBase.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/SessionActiveHandlerBase.java @@ -1,39 +1,38 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.http; -import com.yahoo.config.provision.Provisioner; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.logging.AccessLog; import com.yahoo.vespa.config.server.tenant.ActivateLock; import com.yahoo.vespa.config.server.TimeoutBudget; import com.yahoo.vespa.config.server.ApplicationRepository; import com.yahoo.vespa.config.server.deploy.Deployment; -import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; import com.yahoo.vespa.config.server.session.LocalSession; import com.yahoo.vespa.config.server.session.LocalSessionRepo; -import java.util.Optional; import java.util.concurrent.Executor; /** * @author lulf */ public class SessionActiveHandlerBase extends SessionHandler { + private final ApplicationRepository applicationRepository; - public SessionActiveHandlerBase(Executor executor, AccessLog accessLog) { + public SessionActiveHandlerBase(Executor executor, AccessLog accessLog, ApplicationRepository applicationRepository ) { super(executor, accessLog); + this.applicationRepository = applicationRepository; } protected void activate(HttpRequest request, LocalSessionRepo localSessionRepo, ActivateLock activateLock, TimeoutBudget timeoutBudget, - Optional<Provisioner> hostProvisioner, LocalSession localSession) { - // TODO: Use an injected applicationRepository from the callers of this instead - // TODO: And then get rid of the activateLock and localSessionRepo arguments in deployFromPreparedSession - ApplicationRepository applicationRepository = new ApplicationRepository(null, HostProvisionerProvider.from(hostProvisioner), null, null); - Deployment deployment = applicationRepository.deployFromPreparedSession(localSession, activateLock, localSessionRepo, timeoutBudget.timeLeft()); + // TODO: Get rid of the activateLock and localSessionRepo arguments in deployFromPreparedSession + Deployment deployment = applicationRepository.deployFromPreparedSession(localSession, + activateLock, + localSessionRepo, + timeoutBudget.timeLeft()); deployment.setIgnoreLockFailure(shouldIgnoreLockFailure(request)); deployment.setIgnoreSessionStaleFailure(shouldIgnoreSessionStaleFailure(request)); deployment.activate(); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationContentRequest.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationContentRequest.java index ba7eff7c461..2b5bc4b3d35 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationContentRequest.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationContentRequest.java @@ -15,7 +15,7 @@ import com.yahoo.vespa.config.server.session.LocalSession; * @author lulf * @since 5.3 */ -class ApplicationContentRequest extends ContentRequest { +public class ApplicationContentRequest extends ContentRequest { private static final String uriPattern = "http://*/application/v2/tenant/*/application/*/environment/*/region/*/instance/*/content/*"; private final ApplicationId applicationId; @@ -27,7 +27,7 @@ class ApplicationContentRequest extends ContentRequest { this.zone = zone; } - static ContentRequest create(HttpRequest request, LocalSession session, ApplicationId applicationId, Zone zone) { + public static ContentRequest create(HttpRequest request, LocalSession session, ApplicationId applicationId, Zone zone) { return new ApplicationContentRequest(request, session, applicationId, zone); } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandler.java index c0482ad5d99..e9432e9cf81 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandler.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandler.java @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.http.v2; -import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ApplicationName; import com.yahoo.config.provision.HostFilter; @@ -13,28 +12,22 @@ import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.logging.AccessLog; import com.yahoo.jdisc.Response; import com.yahoo.jdisc.application.BindingMatch; +import com.yahoo.vespa.config.server.http.HttpConfigResponse; import com.yahoo.vespa.config.server.tenant.Tenant; import com.yahoo.vespa.config.server.tenant.Tenants; import com.yahoo.vespa.config.server.TimeoutBudget; -import com.yahoo.vespa.config.server.application.Application; -import com.yahoo.vespa.config.server.application.ApplicationConvergenceChecker; import com.yahoo.vespa.config.server.application.TenantApplications; -import com.yahoo.vespa.config.server.application.LogServerLogGrabber; import com.yahoo.vespa.config.server.ApplicationRepository; -import com.yahoo.vespa.config.server.http.ContentHandler; import com.yahoo.vespa.config.server.http.HttpErrorResponse; import com.yahoo.vespa.config.server.http.HttpHandler; import com.yahoo.vespa.config.server.http.JSONResponse; import com.yahoo.vespa.config.server.http.NotFoundException; -import com.yahoo.vespa.config.server.http.SessionHandler; import com.yahoo.vespa.config.server.http.Utils; import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; -import com.yahoo.vespa.config.server.session.LocalSession; -import com.yahoo.vespa.config.server.session.RemoteSession; -import com.yahoo.vespa.config.server.session.RemoteSessionRepo; -import com.yahoo.vespa.curator.Curator; import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; import java.time.Clock; import java.time.Duration; import java.util.List; @@ -42,7 +35,7 @@ import java.util.Optional; import java.util.concurrent.Executor; /** - * Handler for deleting a currently active application for a tenant. + * Operations on applications (delete, wait for config convergence, restart, application content etc.) * * @author hmusum * @since 5.4 @@ -52,25 +45,21 @@ public class ApplicationHandler extends HttpHandler { private static final String REQUEST_PROPERTY_TIMEOUT = "timeout"; private final Tenants tenants; - private final ContentHandler contentHandler = new ContentHandler(); + private final Optional<Provisioner> hostProvisioner; - private final ApplicationConvergenceChecker convergeChecker; private final Zone zone; - private final LogServerLogGrabber logServerLogGrabber; private final ApplicationRepository applicationRepository; - public ApplicationHandler(Executor executor, AccessLog accessLog, Tenants tenants, - HostProvisionerProvider hostProvisionerProvider, Zone zone, - ApplicationConvergenceChecker convergeChecker, - LogServerLogGrabber logServerLogGrabber, - ConfigserverConfig configserverConfig, Curator curator) { + public ApplicationHandler(Executor executor, AccessLog accessLog, + Tenants tenants, + HostProvisionerProvider hostProvisionerProvider, + Zone zone, + ApplicationRepository applicationRepository) { super(executor, accessLog); this.tenants = tenants; this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); this.zone = zone; - this.convergeChecker = convergeChecker; - this.logServerLogGrabber = logServerLogGrabber; - this.applicationRepository = new ApplicationRepository(tenants, hostProvisionerProvider, configserverConfig, curator); + this.applicationRepository = applicationRepository; } @Override @@ -89,27 +78,24 @@ public class ApplicationHandler extends HttpHandler { Tenant tenant = verifyTenantAndApplication(applicationId); if (isServiceConvergeRequest(request)) { - Application application = getApplication(tenant, applicationId); - return convergeChecker.nodeConvergenceCheck(application, getHostFromRequest(request), request.getUri()); + return applicationRepository.nodeConvergenceCheck(tenant, applicationId, getHostFromRequest(request), request.getUri()); } if (isContentRequest(request)) { - LocalSession session = SessionHandler.getSessionFromRequest(tenant.getLocalSessionRepo(), tenant.getApplicationRepo().getSessionIdForApplication(applicationId)); - return contentHandler.get(ApplicationContentRequest.create(request, session, applicationId, zone)); + return applicationRepository.getContent(tenant, applicationId, zone, request); } - Application application = getApplication(tenant, applicationId); // TODO: Remove this once the config convergence logic is moved to client and is live for all clusters. if (isConvergeRequest(request)) { try { - convergeChecker.waitForConfigConverged(application, new TimeoutBudget(Clock.systemUTC(), durationFromRequestTimeout(request))); + applicationRepository.waitForConfigConverged(tenant, applicationId, new TimeoutBudget(Clock.systemUTC(), durationFromRequestTimeout(request))); } catch (IOException e) { throw new RuntimeException(e); } } if (isServiceConvergeListRequest(request)) { - return convergeChecker.listConfigConvergence(application, request.getUri()); + return applicationRepository.listConfigConvergence(tenant, applicationId, request.getUri()); } - return new GetApplicationResponse(Response.Status.OK, application.getApplicationGeneration()); + return new GetApplicationResponse(Response.Status.OK, applicationRepository.getApplicationGeneration(tenant, applicationId)); } @Override @@ -136,8 +122,18 @@ public class ApplicationHandler extends HttpHandler { if (getBindingMatch(request).groupCount() != 7) throw new NotFoundException("Illegal POST log request '" + request.getUri() + "': Must have 6 arguments but had " + ( getBindingMatch(request).groupCount()-1 ) ); - Application application = getApplication(tenant, applicationId); - return logServerLogGrabber.grabLog(application); + final String response = applicationRepository.grabLog(tenant, applicationId); + return new HttpResponse(200) { + @Override + public void render(OutputStream outputStream) throws IOException { + outputStream.write(response.getBytes(StandardCharsets.UTF_8)); + } + + @Override + public String getContentType() { + return HttpConfigResponse.JSON_CONTENT_TYPE; + } + }; } private HostFilter hostFilterFrom(HttpRequest request) { @@ -164,13 +160,6 @@ public class ApplicationHandler extends HttpHandler { return Duration.ofSeconds(timeoutInSeconds); } - private Application getApplication(Tenant tenant, ApplicationId applicationId) { - TenantApplications applicationRepo = tenant.getApplicationRepo(); - RemoteSessionRepo remoteSessionRepo = tenant.getRemoteSessionRepo(); - long sessionId = applicationRepo.getSessionIdForApplication(applicationId); - RemoteSession session = remoteSessionRepo.getSession(sessionId, 0); - return session.ensureApplicationLoaded().getForVersionOrLatest(Optional.empty()); - } private List<ApplicationId> listApplicationIds(Tenant tenant) { TenantApplications applicationRepo = tenant.getApplicationRepo(); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandler.java index 89463a0b8ee..af8288374f3 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandler.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandler.java @@ -1,24 +1,22 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.http.v2; -import java.util.Optional; import java.util.concurrent.Executor; import com.google.inject.Inject; -import com.yahoo.config.provision.Provisioner; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.logging.AccessLog; import com.yahoo.log.LogLevel; +import com.yahoo.vespa.config.server.ApplicationRepository; import com.yahoo.vespa.config.server.tenant.Tenant; import com.yahoo.vespa.config.server.tenant.Tenants; import com.yahoo.vespa.config.server.TimeoutBudget; import com.yahoo.vespa.config.server.http.SessionActiveHandlerBase; import com.yahoo.vespa.config.server.http.SessionHandler; import com.yahoo.vespa.config.server.http.Utils; -import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; import com.yahoo.vespa.config.server.session.LocalSession; /** @@ -30,18 +28,16 @@ import com.yahoo.vespa.config.server.session.LocalSession; public class SessionActiveHandler extends SessionActiveHandlerBase { private final Tenants tenants; - private final Optional<Provisioner> hostProvisioner; private final Zone zone; @Inject public SessionActiveHandler(Executor executor, AccessLog accessLog, Tenants tenants, - HostProvisionerProvider hostProvisionerProvider, - Zone zone) { - super(executor, accessLog); + Zone zone, + ApplicationRepository applicationRepository) { + super(executor, accessLog, applicationRepository); this.tenants = tenants; - this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); this.zone = zone; } @@ -52,7 +48,7 @@ public class SessionActiveHandler extends SessionActiveHandlerBase { log.log(LogLevel.DEBUG, "Found tenant '" + tenantName + "' in request"); Tenant tenant = Utils.checkThatTenantExists(tenants, tenantName); LocalSession localSession = getSessionFromRequestV2(tenant.getLocalSessionRepo(), request); - activate(request, tenant.getLocalSessionRepo(), tenant.getActivateLock(), timeoutBudget, hostProvisioner, localSession); + activate(request, tenant.getLocalSessionRepo(), tenant.getActivateLock(), timeoutBudget, localSession); return new SessionActiveResponse(localSession.getMetaData().getSlime(), tenantName, request, localSession, zone); } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/model/SuperModel.java b/configserver/src/main/java/com/yahoo/vespa/config/server/model/SuperModel.java index c6aa5ed7f8e..e22f4a42776 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/model/SuperModel.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/model/SuperModel.java @@ -17,7 +17,7 @@ import java.util.Collections; import java.util.Map; /** - * A config model that spans across all applications of all tenants in the config server. + * A config model that provides config containing information from all known tenants and applications. * * @author vegardh * @since 5.9 @@ -28,8 +28,8 @@ public class SuperModel implements LbServicesConfig.Producer, RoutingConfig.Prod private final LbServicesProducer lbProd; private final RoutingProducer zoneProd; - public SuperModel(Map<TenantName, Map<ApplicationId, Application>> newModels, Zone zone) { - this.models = newModels; + public SuperModel(Map<TenantName, Map<ApplicationId, Application>> models, Zone zone) { + this.models = models; this.lbProd = new LbServicesProducer(Collections.unmodifiableMap(models), zone); this.zoneProd = new RoutingProducer(Collections.unmodifiableMap(models)); } @@ -49,9 +49,7 @@ public class SuperModel implements LbServicesConfig.Producer, RoutingConfig.Prod } } - public Map<TenantName, Map<ApplicationId, Application>> getCurrentModels() { - return models; - } + public Map<TenantName, Map<ApplicationId, Application>> applicationModels() { return models; } @Override public void getConfig(LbServicesConfig.Builder builder) { @@ -63,7 +61,8 @@ public class SuperModel implements LbServicesConfig.Producer, RoutingConfig.Prod zoneProd.getConfig(builder); } - public <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> configClass, ApplicationId applicationId, String configId) throws IOException { + public <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> configClass, + ApplicationId applicationId, String configId) throws IOException { TenantName tenant = applicationId.tenant(); if (!models.containsKey(tenant)) { throw new IllegalArgumentException("Tenant " + tenant + " not found"); @@ -77,4 +76,5 @@ public class SuperModel implements LbServicesConfig.Producer, RoutingConfig.Prod ConfigPayload payload = application.getModel().getConfig(key, (ConfigDefinition)null, null); return payload.toInstance(configClass, configId); } + } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java index 99036ee0027..1b32d6bde22 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java @@ -120,13 +120,12 @@ public abstract class ModelsBuilder<MODELRESULT extends ModelResult> { ConfigserverConfig configserverConfig, Zone zone, Set<Rotation> rotations) { - return new ModelContextImpl.Properties( - applicationId, - configserverConfig.multitenant(), - ConfigServerSpec.fromConfig(configserverConfig), - configserverConfig.hostedVespa(), - zone, - rotations); + return new ModelContextImpl.Properties(applicationId, + configserverConfig.multitenant(), + ConfigServerSpec.fromConfig(configserverConfig), + configserverConfig.hostedVespa(), + zone, + rotations); } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java index cacd53cf945..9c1b2b4681e 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java @@ -91,11 +91,7 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P this.applicationId = params.getApplicationId(); this.rotations = new Rotations(curator, tenantPath); this.rotationsSet = getRotations(params.rotations()); - this.properties = createModelContextProperties( - params.getApplicationId(), - configserverConfig, - zone, - rotationsSet); + this.properties = createModelContextProperties(params.getApplicationId(), configserverConfig, zone, rotationsSet); } /** Construct with all dependencies passed separately */ diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java index 0b08279f3ab..7afa9b7db87 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java @@ -24,13 +24,13 @@ import com.yahoo.vespa.config.protocol.ConfigResponse; import com.yahoo.vespa.config.protocol.JRTServerConfigRequest; import com.yahoo.vespa.config.protocol.JRTServerConfigRequestV3; import com.yahoo.vespa.config.protocol.Trace; +import com.yahoo.vespa.config.server.SuperModelRequestHandler; import com.yahoo.vespa.config.server.application.ApplicationSet; import com.yahoo.vespa.config.server.GetConfigContext; import com.yahoo.vespa.config.server.host.HostRegistries; import com.yahoo.vespa.config.server.host.HostRegistry; import com.yahoo.vespa.config.server.ReloadListener; import com.yahoo.vespa.config.server.RequestHandler; -import com.yahoo.vespa.config.server.SuperModelController; import com.yahoo.vespa.config.server.monitoring.MetricUpdater; import com.yahoo.vespa.config.server.monitoring.MetricUpdaterFactory; import com.yahoo.vespa.config.server.tenant.TenantHandlerProvider; @@ -79,7 +79,7 @@ public class RpcServer implements Runnable, ReloadListener, TenantListener { private final HostRegistry<TenantName> hostRegistry; private final Map<TenantName, TenantHandlerProvider> tenantProviders = new ConcurrentHashMap<>(); - private final SuperModelController superModelController; + private final SuperModelRequestHandler superModelRequestHandler; private final MetricUpdater metrics; private final MetricUpdaterFactory metricUpdaterFactory; private final HostLivenessTracker hostLivenessTracker; @@ -93,9 +93,9 @@ public class RpcServer implements Runnable, ReloadListener, TenantListener { * @param config The config to use for setting up this server */ @Inject - public RpcServer(ConfigserverConfig config, SuperModelController superModelController, MetricUpdaterFactory metrics, + public RpcServer(ConfigserverConfig config, SuperModelRequestHandler superModelRequestHandler, MetricUpdaterFactory metrics, HostRegistries hostRegistries, HostLivenessTracker hostLivenessTracker) { - this.superModelController = superModelController; + this.superModelRequestHandler = superModelRequestHandler; this.metricUpdaterFactory = metrics; this.supervisor.setMaxOutputBufferSize(config.maxoutputbuffersize()); this.metrics = metrics.getOrCreateMetricUpdater(Collections.<String, String>emptyMap()); @@ -188,13 +188,13 @@ public class RpcServer implements Runnable, ReloadListener, TenantListener { */ @Override public void configReloaded(TenantName tenant, ApplicationSet applicationSet) { - final ApplicationId applicationId = applicationSet.getId(); + ApplicationId applicationId = applicationSet.getId(); configReloaded(delayedConfigResponses.drainQueue(applicationId), Tenants.logPre(applicationId)); reloadSuperModel(tenant, applicationSet); } private void reloadSuperModel(TenantName tenant, ApplicationSet applicationSet) { - superModelController.reloadConfig(tenant, applicationSet); + superModelRequestHandler.reloadConfig(tenant, applicationSet); configReloaded(delayedConfigResponses.drainQueue(ApplicationId.global()), Tenants.logPre(ApplicationId.global())); } @@ -253,7 +253,7 @@ public class RpcServer implements Runnable, ReloadListener, TenantListener { @Override public void applicationRemoved(ApplicationId applicationId) { - superModelController.removeApplication(applicationId); + superModelRequestHandler.removeApplication(applicationId); configReloaded(delayedConfigResponses.drainQueue(applicationId), Tenants.logPre(applicationId)); configReloaded(delayedConfigResponses.drainQueue(ApplicationId.global()), Tenants.logPre(ApplicationId.global())); } @@ -286,12 +286,8 @@ public class RpcServer implements Runnable, ReloadListener, TenantListener { } public ConfigResponse resolveConfig(JRTServerConfigRequest request, GetConfigContext context, Optional<Version> vespaVersion) { - Trace trace = context.trace(); - if (trace.shouldTrace(TRACELEVEL)) { - trace.trace(TRACELEVEL, "RpcServer.resolveConfig()"); - } - RequestHandler handler = context.requestHandler(); - return handler.resolveConfig(context.applicationId(), request, vespaVersion); + context.trace().trace(TRACELEVEL, "RpcServer.resolveConfig()"); + return context.requestHandler().resolveConfig(context.applicationId(), request, vespaVersion); } protected Supervisor getSupervisor() { @@ -338,7 +334,7 @@ public class RpcServer implements Runnable, ReloadListener, TenantListener { */ public GetConfigContext createGetConfigContext(Optional<TenantName> optionalTenant, JRTServerConfigRequest request, Trace trace) { if ("*".equals(request.getConfigKey().getConfigId())) { - return GetConfigContext.create(ApplicationId.global(), superModelController, trace); + return GetConfigContext.create(ApplicationId.global(), superModelRequestHandler, trace); } TenantName tenant = optionalTenant.orElse(TenantName.defaultName()); // perhaps needed for non-hosted? if ( ! hasRequestHandler(tenant)) { @@ -383,7 +379,7 @@ public class RpcServer implements Runnable, ReloadListener, TenantListener { @Override public void onTenantsLoaded() { allTenantsLoaded = true; - superModelController.enable(); + superModelRequestHandler.enable(); } @Override diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java index b10865f257b..d2ded8ee226 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java @@ -163,10 +163,7 @@ public class SessionPreparer { void preprocess() { try { - this.applicationPackage = context.getApplicationPackage().preprocess( - properties.zone(), - null, - logger); + this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), null, logger); } catch (IOException | TransformerException | ParserConfigurationException | SAXException e) { throw new RuntimeException("Error deploying application package", e); } diff --git a/configserver/src/main/resources/configserver-app/services.xml b/configserver/src/main/resources/configserver-app/services.xml index 28652e73007..8a4069a4341 100644 --- a/configserver/src/main/resources/configserver-app/services.xml +++ b/configserver/src/main/resources/configserver-app/services.xml @@ -12,7 +12,7 @@ <component id="com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry" bundle="configserver" /> <component id="com.yahoo.vespa.config.server.SuperModelGenerationCounter" bundle="configserver" /> <component id="com.yahoo.vespa.config.server.session.SessionPreparer" bundle="configserver" /> - <component id="com.yahoo.vespa.config.server.SuperModelController" bundle="configserver" /> + <component id="com.yahoo.vespa.config.server.SuperModelRequestHandler" bundle="configserver" /> <component id="com.yahoo.vespa.config.server.StaticConfigDefinitionRepo" bundle="configserver" /> <component id="com.yahoo.vespa.config.server.provision.HostProvisionerProvider" bundle="configserver" /> <component id="com.yahoo.vespa.curator.Curator" bundle="configserver" /> diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java index 9077b3fdac9..68b9c06aa7b 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java @@ -1,29 +1,37 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server; -import com.yahoo.cloud.config.ConfigserverConfig; +import com.yahoo.cloud.config.LbServicesConfig; import com.yahoo.config.model.application.provider.FilesApplicationPackage; -import com.yahoo.config.provision.Version; +import com.yahoo.config.provision.*; +import com.yahoo.jrt.Request; +import com.yahoo.vespa.config.ConfigKey; +import com.yahoo.cloud.config.LbServicesConfig.Tenants.Applications; +import com.yahoo.vespa.config.protocol.CompressionType; +import com.yahoo.vespa.config.protocol.DefContent; +import com.yahoo.vespa.config.protocol.JRTClientConfigRequestV3; +import com.yahoo.vespa.config.protocol.JRTServerConfigRequestV3; +import com.yahoo.vespa.config.protocol.Trace; import com.yahoo.vespa.config.server.application.Application; -import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.TenantName; -import com.yahoo.vespa.config.server.application.ApplicationSet; +import com.yahoo.vespa.config.server.model.SuperModel; import com.yahoo.vespa.config.server.monitoring.MetricUpdater; -import com.yahoo.vespa.curator.mock.MockCurator; +import com.yahoo.vespa.config.server.rpc.UncompressedConfigResponseFactory; import com.yahoo.vespa.model.VespaModel; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.TemporaryFolder; import org.xml.sax.SAXException; import java.io.File; import java.io.IOException; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; import java.util.Optional; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.*; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; /** * @author lulf @@ -31,100 +39,93 @@ import static org.junit.Assert.*; */ public class SuperModelControllerTest { - private static final File testApp = new File("src/test/resources/deploy/app"); - private SuperModelGenerationCounter counter; - private SuperModelController controller; - - @Rule - public TemporaryFolder folder = new TemporaryFolder(); + private SuperModelController handler; @Before - public void setup() throws IOException { - counter = new SuperModelGenerationCounter(new MockCurator()); - controller = new SuperModelController(counter, - new TestConfigDefinitionRepo(), - new ConfigserverConfig(new ConfigserverConfig.Builder())); + public void setupHandler() throws IOException, SAXException { + Map<TenantName, Map<ApplicationId, Application>> models = new LinkedHashMap<>(); + models.put(TenantName.from("a"), new LinkedHashMap<>()); + File testApp = new File("src/test/resources/deploy/app"); + ApplicationId app = ApplicationId.from(TenantName.from("a"), + ApplicationName.from("foo"), InstanceName.defaultName()); + models.get(app.tenant()).put(app, new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp)), new ServerCache(), 4l, Version.fromIntValues(1, 2, 3), MetricUpdater.createTestUpdater(), app)); + handler = new SuperModelController(new SuperModel(models, Zone.defaultZone()), new TestConfigDefinitionRepo(), 2, new UncompressedConfigResponseFactory()); } - + @Test - public void test_super_model_reload() throws IOException, SAXException { - TenantName tenantA = TenantName.from("a"); - assertNotNull(controller.getHandler()); - long gen = counter.increment(); - controller.reloadConfig(tenantA, createApp(tenantA, "foo", 3l, 1)); - assertNotNull(controller.getHandler()); - assertThat(controller.getHandler().getGeneration(), is(gen)); - controller.reloadConfig(tenantA, createApp(tenantA, "foo", 4l, 2)); - assertThat(controller.getHandler().getGeneration(), is(gen)); - // Test that a new app is used when there already exist an application with the same id - ApplicationId appId = new ApplicationId.Builder().tenant(tenantA).applicationName("foo").build(); - assertThat(((TestApplication) controller.getHandler().getSuperModel().getCurrentModels().get(tenantA).get(appId)).version, is(2l)); - gen = counter.increment(); - controller.reloadConfig(tenantA, createApp(tenantA, "bar", 2l, 3)); - assertThat(controller.getHandler().getGeneration(), is(gen)); + public void test_lb_config_simple() { + LbServicesConfig.Builder lb = new LbServicesConfig.Builder(); + handler.getSuperModel().getConfig(lb); + LbServicesConfig lbc = new LbServicesConfig(lb); + assertThat(lbc.tenants().size(), is(1)); + assertThat(lbc.tenants("a").applications().size(), is(1)); + Applications app = lbc.tenants("a").applications("foo:prod:default:default"); + assertTrue(app.hosts().size() > 0); } - @Test - public void test_super_model_remove() throws IOException, SAXException { - TenantName tenantA = TenantName.from("a"); - TenantName tenantB = TenantName.from("b"); - long gen = counter.increment(); - controller.reloadConfig(tenantA, createApp(tenantA, "foo", 3l, 1)); - controller.reloadConfig(tenantA, createApp(tenantA, "bar", 30l, 2)); - controller.reloadConfig(tenantB, createApp(tenantB, "baz", 9l, 3)); - assertThat(controller.getHandler().getGeneration(), is(gen)); - assertThat(controller.getHandler().getSuperModel().getCurrentModels().size(), is(2)); - assertThat(controller.getHandler().getSuperModel().getCurrentModels().get(TenantName.from("a")).size(), is(2)); - controller.removeApplication( - new ApplicationId.Builder().tenant("a").applicationName("unknown").build()); - assertThat(controller.getHandler().getGeneration(), is(gen)); - assertThat(controller.getHandler().getSuperModel().getCurrentModels().size(), is(2)); - assertThat(controller.getHandler().getSuperModel().getCurrentModels().get(TenantName.from("a")).size(), is(2)); - gen = counter.increment(); - controller.removeApplication( - new ApplicationId.Builder().tenant("a").applicationName("bar").build()); - assertThat(controller.getHandler().getSuperModel().getCurrentModels().size(), is(2)); - assertThat(controller.getHandler().getSuperModel().getCurrentModels().get(TenantName.from("a")).size(), is(1)); - assertThat(controller.getHandler().getGeneration(), is(gen)); - } - @Test - public void test_super_model_master_generation() throws IOException, SAXException { - TenantName tenantA = TenantName.from("a"); - long masterGen = 10; - controller = new SuperModelController(counter, - new TestConfigDefinitionRepo(), - new ConfigserverConfig(new ConfigserverConfig.Builder().masterGeneration(masterGen))); - - long gen = counter.increment(); - controller.reloadConfig(tenantA, createApp(tenantA, "foo", 3L, 1)); - assertThat(controller.getHandler().getGeneration(), is(masterGen + gen)); + @Test(expected = UnknownConfigDefinitionException.class) + public void test_unknown_config_definition() { + String md5 = "asdfasf"; + Request request = JRTClientConfigRequestV3.createWithParams(new ConfigKey<>("foo", "id", "bar", md5, null), DefContent.fromList(Collections.emptyList()), + "fromHost", md5, 1, 1, Trace.createDummy(), CompressionType.UNCOMPRESSED, + Optional.empty()) + .getRequest(); + JRTServerConfigRequestV3 v3Request = JRTServerConfigRequestV3.createFromRequest(request); + handler.resolveConfig(v3Request); } @Test - public void test_super_model_has_application_when_enabled() { - assertFalse(controller.hasApplication(ApplicationId.global(), Optional.empty())); - controller.enable(); - assertTrue(controller.hasApplication(ApplicationId.global(), Optional.empty())); + public void test_lb_config_multiple_apps() throws IOException, SAXException { + Map<TenantName, Map<ApplicationId, Application>> models = new LinkedHashMap<>(); + models.put(TenantName.from("t1"), new LinkedHashMap<>()); + models.put(TenantName.from("t2"), new LinkedHashMap<>()); + File testApp1 = new File("src/test/resources/deploy/app"); + File testApp2 = new File("src/test/resources/deploy/advancedapp"); + File testApp3 = new File("src/test/resources/deploy/advancedapp"); + // TODO must fix equals, hashCode on Tenant + Version vespaVersion = Version.fromIntValues(1, 2, 3); + models.get(TenantName.from("t1")).put(applicationId("mysimpleapp"), + new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp1)), new ServerCache(), 4l, vespaVersion, MetricUpdater.createTestUpdater(), applicationId("mysimpleapp"))); + models.get(TenantName.from("t1")).put(applicationId("myadvancedapp"), + new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp2)), new ServerCache(), 4l, vespaVersion, MetricUpdater.createTestUpdater(), applicationId("myadvancedapp"))); + models.get(TenantName.from("t2")).put(applicationId("minetooadvancedapp"), + new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp3)), new ServerCache(), 4l, vespaVersion, MetricUpdater.createTestUpdater(), applicationId("minetooadvancedapp"))); + + SuperModelController han = new SuperModelController(new SuperModel(models, Zone.defaultZone()), new TestConfigDefinitionRepo(), 2, new UncompressedConfigResponseFactory()); + LbServicesConfig.Builder lb = new LbServicesConfig.Builder(); + han.getSuperModel().getConfig(lb); + LbServicesConfig lbc = new LbServicesConfig(lb); + assertThat(lbc.tenants().size(), is(2)); + assertThat(lbc.tenants("t1").applications().size(), is(2)); + assertThat(lbc.tenants("t2").applications().size(), is(1)); + assertThat(lbc.tenants("t2").applications("minetooadvancedapp:prod:default:default").hosts().size(), is(1)); + assertQrServer(lbc.tenants("t2").applications("minetooadvancedapp:prod:default:default")); } - private ApplicationSet createApp(TenantName tenant, String application, long generation, long version) throws IOException, SAXException { - return ApplicationSet.fromSingle( - new TestApplication( - new VespaModel(FilesApplicationPackage.fromFile(testApp)), - new ServerCache(), - generation, - new ApplicationId.Builder().tenant(tenant).applicationName(application).build(), - version)); + private ApplicationId applicationId(String applicationName) { + return ApplicationId.from(TenantName.defaultName(), + ApplicationName.from(applicationName), InstanceName.defaultName()); } - private static class TestApplication extends Application { - private long version = 0; - - public TestApplication(VespaModel vespaModel, ServerCache cache, long appGeneration, ApplicationId app, long version) { - super(vespaModel, cache, appGeneration, Version.fromIntValues(1, 2, 3), MetricUpdater.createTestUpdater(), app); - this.version = version; + private void assertQrServer(Applications app) { + String host = app.hosts().keySet().iterator().next(); + Applications.Hosts hosts = app.hosts(host); + assertThat(hosts.hostname(), is(host)); + for (Map.Entry<String, Applications.Hosts.Services> e : app.hosts(host).services().entrySet()) { + System.out.println(e); + if ("qrserver".equals(e.getKey())) { + Applications.Hosts.Services s = e.getValue(); + assertThat(s.type(), is("qrserver")); + assertThat(s.ports().size(), is(4)); + assertThat(s.index(), is(0)); + return; + } } + org.junit.Assert.fail("No qrserver service in config"); } } + + + diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java index 65d6611112c..a2b0b4e7d22 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java @@ -1,37 +1,29 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server; -import com.yahoo.cloud.config.LbServicesConfig; +import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.config.model.application.provider.FilesApplicationPackage; -import com.yahoo.config.provision.*; -import com.yahoo.jrt.Request; -import com.yahoo.vespa.config.ConfigKey; -import com.yahoo.cloud.config.LbServicesConfig.Tenants.Applications; -import com.yahoo.vespa.config.protocol.CompressionType; -import com.yahoo.vespa.config.protocol.DefContent; -import com.yahoo.vespa.config.protocol.JRTClientConfigRequestV3; -import com.yahoo.vespa.config.protocol.JRTServerConfigRequestV3; -import com.yahoo.vespa.config.protocol.Trace; +import com.yahoo.config.provision.Version; import com.yahoo.vespa.config.server.application.Application; -import com.yahoo.vespa.config.server.model.SuperModel; +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.TenantName; +import com.yahoo.vespa.config.server.application.ApplicationSet; import com.yahoo.vespa.config.server.monitoring.MetricUpdater; -import com.yahoo.vespa.config.server.rpc.UncompressedConfigResponseFactory; +import com.yahoo.vespa.curator.mock.MockCurator; import com.yahoo.vespa.model.VespaModel; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.TemporaryFolder; import org.xml.sax.SAXException; import java.io.File; import java.io.IOException; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.Map; import java.util.Optional; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.*; /** * @author lulf @@ -39,93 +31,100 @@ import static org.junit.Assert.assertTrue; */ public class SuperModelRequestHandlerTest { - private SuperModelRequestHandler handler; + private static final File testApp = new File("src/test/resources/deploy/app"); + private SuperModelGenerationCounter counter; + private SuperModelRequestHandler controller; + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); @Before - public void setupHandler() throws IOException, SAXException { - Map<TenantName, Map<ApplicationId, Application>> models = new LinkedHashMap<>(); - models.put(TenantName.from("a"), new LinkedHashMap<>()); - File testApp = new File("src/test/resources/deploy/app"); - ApplicationId app = ApplicationId.from(TenantName.from("a"), - ApplicationName.from("foo"), InstanceName.defaultName()); - models.get(app.tenant()).put(app, new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp)), new ServerCache(), 4l, Version.fromIntValues(1, 2, 3), MetricUpdater.createTestUpdater(), app)); - handler = new SuperModelRequestHandler(new SuperModel(models, Zone.defaultZone()), new TestConfigDefinitionRepo(), 2, new UncompressedConfigResponseFactory()); + public void setup() throws IOException { + counter = new SuperModelGenerationCounter(new MockCurator()); + controller = new SuperModelRequestHandler(counter, + new TestConfigDefinitionRepo(), + new ConfigserverConfig(new ConfigserverConfig.Builder())); } - + @Test - public void test_lb_config_simple() { - LbServicesConfig.Builder lb = new LbServicesConfig.Builder(); - handler.getSuperModel().getConfig(lb); - LbServicesConfig lbc = new LbServicesConfig(lb); - assertThat(lbc.tenants().size(), is(1)); - assertThat(lbc.tenants("a").applications().size(), is(1)); - Applications app = lbc.tenants("a").applications("foo:prod:default:default"); - assertTrue(app.hosts().size() > 0); + public void test_super_model_reload() throws IOException, SAXException { + TenantName tenantA = TenantName.from("a"); + assertNotNull(controller.getHandler()); + long gen = counter.increment(); + controller.reloadConfig(tenantA, createApp(tenantA, "foo", 3l, 1)); + assertNotNull(controller.getHandler()); + assertThat(controller.getHandler().getGeneration(), is(gen)); + controller.reloadConfig(tenantA, createApp(tenantA, "foo", 4l, 2)); + assertThat(controller.getHandler().getGeneration(), is(gen)); + // Test that a new app is used when there already exist an application with the same id + ApplicationId appId = new ApplicationId.Builder().tenant(tenantA).applicationName("foo").build(); + assertThat(((TestApplication) controller.getHandler().getSuperModel().applicationModels().get(tenantA).get(appId)).version, is(2l)); + gen = counter.increment(); + controller.reloadConfig(tenantA, createApp(tenantA, "bar", 2l, 3)); + assertThat(controller.getHandler().getGeneration(), is(gen)); } + @Test + public void test_super_model_remove() throws IOException, SAXException { + TenantName tenantA = TenantName.from("a"); + TenantName tenantB = TenantName.from("b"); + long gen = counter.increment(); + controller.reloadConfig(tenantA, createApp(tenantA, "foo", 3l, 1)); + controller.reloadConfig(tenantA, createApp(tenantA, "bar", 30l, 2)); + controller.reloadConfig(tenantB, createApp(tenantB, "baz", 9l, 3)); + assertThat(controller.getHandler().getGeneration(), is(gen)); + assertThat(controller.getHandler().getSuperModel().applicationModels().size(), is(2)); + assertThat(controller.getHandler().getSuperModel().applicationModels().get(TenantName.from("a")).size(), is(2)); + controller.removeApplication( + new ApplicationId.Builder().tenant("a").applicationName("unknown").build()); + assertThat(controller.getHandler().getGeneration(), is(gen)); + assertThat(controller.getHandler().getSuperModel().applicationModels().size(), is(2)); + assertThat(controller.getHandler().getSuperModel().applicationModels().get(TenantName.from("a")).size(), is(2)); + gen = counter.increment(); + controller.removeApplication( + new ApplicationId.Builder().tenant("a").applicationName("bar").build()); + assertThat(controller.getHandler().getSuperModel().applicationModels().size(), is(2)); + assertThat(controller.getHandler().getSuperModel().applicationModels().get(TenantName.from("a")).size(), is(1)); + assertThat(controller.getHandler().getGeneration(), is(gen)); + } - @Test(expected = UnknownConfigDefinitionException.class) - public void test_unknown_config_definition() { - String md5 = "asdfasf"; - Request request = JRTClientConfigRequestV3.createWithParams(new ConfigKey<>("foo", "id", "bar", md5, null), DefContent.fromList(Collections.emptyList()), - "fromHost", md5, 1, 1, Trace.createDummy(), CompressionType.UNCOMPRESSED, - Optional.empty()) - .getRequest(); - JRTServerConfigRequestV3 v3Request = JRTServerConfigRequestV3.createFromRequest(request); - handler.resolveConfig(v3Request); + @Test + public void test_super_model_master_generation() throws IOException, SAXException { + TenantName tenantA = TenantName.from("a"); + long masterGen = 10; + controller = new SuperModelRequestHandler(counter, + new TestConfigDefinitionRepo(), + new ConfigserverConfig(new ConfigserverConfig.Builder().masterGeneration(masterGen))); + + long gen = counter.increment(); + controller.reloadConfig(tenantA, createApp(tenantA, "foo", 3L, 1)); + assertThat(controller.getHandler().getGeneration(), is(masterGen + gen)); } @Test - public void test_lb_config_multiple_apps() throws IOException, SAXException { - Map<TenantName, Map<ApplicationId, Application>> models = new LinkedHashMap<>(); - models.put(TenantName.from("t1"), new LinkedHashMap<>()); - models.put(TenantName.from("t2"), new LinkedHashMap<>()); - File testApp1 = new File("src/test/resources/deploy/app"); - File testApp2 = new File("src/test/resources/deploy/advancedapp"); - File testApp3 = new File("src/test/resources/deploy/advancedapp"); - // TODO must fix equals, hashCode on Tenant - Version vespaVersion = Version.fromIntValues(1, 2, 3); - models.get(TenantName.from("t1")).put(applicationId("mysimpleapp"), - new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp1)), new ServerCache(), 4l, vespaVersion, MetricUpdater.createTestUpdater(), applicationId("mysimpleapp"))); - models.get(TenantName.from("t1")).put(applicationId("myadvancedapp"), - new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp2)), new ServerCache(), 4l, vespaVersion, MetricUpdater.createTestUpdater(), applicationId("myadvancedapp"))); - models.get(TenantName.from("t2")).put(applicationId("minetooadvancedapp"), - new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp3)), new ServerCache(), 4l, vespaVersion, MetricUpdater.createTestUpdater(), applicationId("minetooadvancedapp"))); - - SuperModelRequestHandler han = new SuperModelRequestHandler(new SuperModel(models, Zone.defaultZone()), new TestConfigDefinitionRepo(), 2, new UncompressedConfigResponseFactory()); - LbServicesConfig.Builder lb = new LbServicesConfig.Builder(); - han.getSuperModel().getConfig(lb); - LbServicesConfig lbc = new LbServicesConfig(lb); - assertThat(lbc.tenants().size(), is(2)); - assertThat(lbc.tenants("t1").applications().size(), is(2)); - assertThat(lbc.tenants("t2").applications().size(), is(1)); - assertThat(lbc.tenants("t2").applications("minetooadvancedapp:prod:default:default").hosts().size(), is(1)); - assertQrServer(lbc.tenants("t2").applications("minetooadvancedapp:prod:default:default")); + public void test_super_model_has_application_when_enabled() { + assertFalse(controller.hasApplication(ApplicationId.global(), Optional.empty())); + controller.enable(); + assertTrue(controller.hasApplication(ApplicationId.global(), Optional.empty())); } - private ApplicationId applicationId(String applicationName) { - return ApplicationId.from(TenantName.defaultName(), - ApplicationName.from(applicationName), InstanceName.defaultName()); + private ApplicationSet createApp(TenantName tenant, String application, long generation, long version) throws IOException, SAXException { + return ApplicationSet.fromSingle( + new TestApplication( + new VespaModel(FilesApplicationPackage.fromFile(testApp)), + new ServerCache(), + generation, + new ApplicationId.Builder().tenant(tenant).applicationName(application).build(), + version)); } - private void assertQrServer(Applications app) { - String host = app.hosts().keySet().iterator().next(); - Applications.Hosts hosts = app.hosts(host); - assertThat(hosts.hostname(), is(host)); - for (Map.Entry<String, Applications.Hosts.Services> e : app.hosts(host).services().entrySet()) { - System.out.println(e); - if ("qrserver".equals(e.getKey())) { - Applications.Hosts.Services s = e.getValue(); - assertThat(s.type(), is("qrserver")); - assertThat(s.ports().size(), is(4)); - assertThat(s.index(), is(0)); - return; - } + private static class TestApplication extends Application { + private long version = 0; + + public TestApplication(VespaModel vespaModel, ServerCache cache, long appGeneration, ApplicationId app, long version) { + super(vespaModel, cache, appGeneration, Version.fromIntValues(1, 2, 3), MetricUpdater.createTestUpdater(), app); + this.version = version; } - org.junit.Assert.fail("No qrserver service in config"); } } - - - diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java index 30a3eec47fd..3c19725c22f 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java @@ -26,8 +26,9 @@ import com.yahoo.transaction.NestedTransaction; import com.yahoo.vespa.config.server.ApplicationRepository; import com.yahoo.vespa.config.server.TestComponentRegistry; import com.yahoo.vespa.config.server.TimeoutBudget; +import com.yahoo.vespa.config.server.application.ApplicationConvergenceChecker; +import com.yahoo.vespa.config.server.application.LogServerLogGrabber; import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry; -import com.yahoo.vespa.config.server.modelfactory.ModelResult; import com.yahoo.vespa.config.server.monitoring.Metrics; import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; import com.yahoo.vespa.config.server.session.LocalSession; @@ -35,13 +36,10 @@ import com.yahoo.vespa.config.server.session.PrepareParams; import com.yahoo.vespa.config.server.session.SilentDeployLogger; import com.yahoo.vespa.config.server.tenant.Tenant; import com.yahoo.vespa.config.server.tenant.Tenants; -import com.yahoo.vespa.config.server.zookeeper.ConfigCurator; import com.yahoo.vespa.curator.Curator; import com.yahoo.vespa.curator.mock.MockCurator; import com.yahoo.vespa.model.VespaModel; import com.yahoo.vespa.model.VespaModelFactory; -import org.apache.curator.framework.CuratorFramework; -import org.junit.Before; import java.io.File; import java.io.IOException; @@ -112,11 +110,14 @@ public class DeployTester { } public Optional<com.yahoo.config.provision.Deployment> redeployFromLocalActive(ApplicationId id) { - ApplicationRepository applicationRepository = new ApplicationRepository(tenants, HostProvisionerProvider.withProvisioner(createHostProvisioner()), - new ConfigserverConfig(new ConfigserverConfig.Builder()), curator); - - Optional<com.yahoo.config.provision.Deployment> deployment = applicationRepository.deployFromLocalActive(id, Duration.ofSeconds(60)); - return deployment; + ApplicationRepository applicationRepository = new ApplicationRepository(tenants, + HostProvisionerProvider.withProvisioner(createHostProvisioner()), + new ConfigserverConfig(new ConfigserverConfig.Builder()), + curator, + new LogServerLogGrabber(), + new ApplicationConvergenceChecker()); + + return applicationRepository.deployFromLocalActive(id, Duration.ofSeconds(60)); } private Provisioner createHostProvisioner() { diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationContentHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationContentHandlerTest.java index f92bfa7d866..fc5a672559d 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationContentHandlerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationContentHandlerTest.java @@ -1,6 +1,7 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.http.v2; +import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.config.model.application.provider.FilesApplicationPackage; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; @@ -9,9 +10,13 @@ import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.logging.AccessLog; import com.yahoo.jdisc.Response; import com.yahoo.config.provision.ApplicationId; +import com.yahoo.vespa.config.server.ApplicationRepository; +import com.yahoo.vespa.config.server.application.ApplicationConvergenceChecker; +import com.yahoo.vespa.config.server.application.LogServerLogGrabber; import com.yahoo.vespa.config.server.http.ContentHandlerTestBase; import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; import com.yahoo.vespa.config.server.session.Session; +import com.yahoo.vespa.curator.mock.MockCurator; import org.junit.Before; import org.junit.Test; @@ -51,7 +56,17 @@ public class ApplicationContentHandlerTest extends ContentHandlerTestBase { testTenantBuilder.tenants().get(tenant2).getLocalSessionRepo().addSession(new MockSession(3l, FilesApplicationPackage.fromFile(new File("src/test/apps/content2")))); testTenantBuilder.tenants().get(tenant1).getApplicationRepo().createPutApplicationTransaction(idTenant1, 2l).commit(); testTenantBuilder.tenants().get(tenant2).getApplicationRepo().createPutApplicationTransaction(idTenant2, 3l).commit(); - handler = new ApplicationHandler(command -> command.run(), AccessLog.voidAccessLog(), testTenantBuilder.createTenants(), HostProvisionerProvider.empty(), Zone.defaultZone(), null, null, null, null); + handler = new ApplicationHandler(command -> command.run(), + AccessLog.voidAccessLog(), + testTenantBuilder.createTenants(), + HostProvisionerProvider.empty(), + Zone.defaultZone(), + new ApplicationRepository(testTenantBuilder.createTenants(), + HostProvisionerProvider.empty(), + new ConfigserverConfig(new ConfigserverConfig.Builder()), + new MockCurator(), + new LogServerLogGrabber(), + new ApplicationConvergenceChecker())); pathPrefix = createPath(idTenant1, Zone.defaultZone()); baseUrl = baseServer + pathPrefix; } diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java index a1a4a3413ba..f8071721989 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.config.server.http.v2; import com.fasterxml.jackson.databind.ObjectMapper; +import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.model.NullConfigModelRegistry; import com.yahoo.config.model.application.provider.FilesApplicationPackage; @@ -15,6 +16,7 @@ import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.logging.AccessLog; import com.yahoo.jdisc.Response; import com.yahoo.path.Path; +import com.yahoo.vespa.config.server.ApplicationRepository; import com.yahoo.vespa.config.server.GlobalComponentRegistry; import com.yahoo.vespa.config.server.MockReloadHandler; import com.yahoo.vespa.config.server.SuperModelGenerationCounter; @@ -98,10 +100,12 @@ public class ApplicationHandlerTest { tenants, HostProvisionerProvider.withProvisioner(provisioner), Zone.defaultZone(), - convergeChecker, - logServerLogGrabber, - null, - null); + new ApplicationRepository(tenants, + HostProvisionerProvider.withProvisioner(provisioner), + new ConfigserverConfig(new ConfigserverConfig.Builder()), + new MockCurator(), + logServerLogGrabber, + convergeChecker)); } private ApplicationHandler createApplicationHandler(Tenants tenants) { @@ -111,10 +115,12 @@ public class ApplicationHandlerTest { tenants, HostProvisionerProvider.withProvisioner(provisioner), Zone.defaultZone(), - new ApplicationConvergenceChecker(stateApiFactory), - new LogServerLogGrabber(), - null, - null); + new ApplicationRepository(tenants, + HostProvisionerProvider.withProvisioner(provisioner), + new ConfigserverConfig(new ConfigserverConfig.Builder()), + new MockCurator(), + new LogServerLogGrabber(), + new ApplicationConvergenceChecker(stateApiFactory))); } @Test diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandlerTest.java index 0b0c4ad0629..e03282da72b 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandlerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandlerTest.java @@ -5,8 +5,8 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.Collection; import java.util.List; -import java.util.concurrent.Executor; +import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.config.provision.*; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.logging.AccessLog; @@ -15,6 +15,8 @@ import com.yahoo.jdisc.http.HttpRequest; import com.yahoo.slime.JsonFormat; import com.yahoo.transaction.NestedTransaction; import com.yahoo.vespa.config.server.*; +import com.yahoo.vespa.config.server.application.ApplicationConvergenceChecker; +import com.yahoo.vespa.config.server.application.LogServerLogGrabber; import com.yahoo.vespa.config.server.http.HttpErrorResponse; import com.yahoo.vespa.config.server.http.SessionHandlerTest; import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; @@ -158,13 +160,17 @@ public class SessionActiveHandlerTest extends SessionActiveHandlerTestBase { .withRemoteSessionRepo(remoteSessionRepo) .withApplicationRepo(applicationRepo) .build(); - return new SessionActiveHandler(new Executor() { - @SuppressWarnings("NullableProblems") - @Override - public void execute(Runnable command) { - command.run(); - } - }, AccessLog.voidAccessLog(), testTenantBuilder.createTenants(), HostProvisionerProvider.withProvisioner(hostProvisioner), Zone.defaultZone()); + return new SessionActiveHandler( + Runnable::run, + AccessLog.voidAccessLog(), + testTenantBuilder.createTenants(), + Zone.defaultZone(), + new ApplicationRepository(testTenantBuilder.createTenants(), + HostProvisionerProvider.withProvisioner(hostProvisioner), + new ConfigserverConfig(new ConfigserverConfig.Builder()), + curator, + new LogServerLogGrabber(), + new ApplicationConvergenceChecker())); } public static class MockProvisioner implements Provisioner { diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java index 3831f94a77d..b7af5e09f47 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals; * @author lulf */ public class StaticProvisionerTest { + @Test public void sameHostsAreProvisioned() throws IOException, SAXException { ApplicationPackage app = FilesApplicationPackage.fromFile(new File("src/test/apps/hosted")); diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/TestWithRpc.java b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/TestWithRpc.java index 887d2b2f5d6..8f1754357b2 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/TestWithRpc.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/TestWithRpc.java @@ -11,11 +11,11 @@ import com.yahoo.jrt.Transport; import com.yahoo.net.HostName; import com.yahoo.test.ManualClock; import com.yahoo.vespa.config.GenerationCounter; +import com.yahoo.vespa.config.server.SuperModelRequestHandler; import com.yahoo.vespa.config.server.host.ConfigRequestHostLivenessTracker; import com.yahoo.vespa.config.server.host.HostRegistries; import com.yahoo.vespa.config.server.MemoryGenerationCounter; import com.yahoo.vespa.config.server.PortRangeAllocator; -import com.yahoo.vespa.config.server.SuperModelController; import com.yahoo.vespa.config.server.TestConfigDefinitionRepo; import com.yahoo.vespa.config.server.monitoring.Metrics; import com.yahoo.vespa.config.server.tenant.MockTenantProvider; @@ -82,9 +82,9 @@ public class TestWithRpc { protected void createAndStartRpcServer(boolean hostedVespa) { rpcServer = new RpcServer(new ConfigserverConfig(new ConfigserverConfig.Builder().rpcport(port).numthreads(1).maxgetconfigclients(1).hostedVespa(hostedVespa)), - new SuperModelController(generationCounter, - new TestConfigDefinitionRepo(), - new ConfigserverConfig(new ConfigserverConfig.Builder())), + new SuperModelRequestHandler(generationCounter, + new TestConfigDefinitionRepo(), + new ConfigserverConfig(new ConfigserverConfig.Builder())), Metrics.createTestMetrics(), new HostRegistries(), hostLivenessTracker); rpcServer.onTenantCreate(TenantName.from("default"), tenantProvider); diff --git a/container-core/src/main/java/com/yahoo/container/handler/VipStatusHandler.java b/container-core/src/main/java/com/yahoo/container/handler/VipStatusHandler.java index 3be1d08c5dc..f36cd77c0b7 100644 --- a/container-core/src/main/java/com/yahoo/container/handler/VipStatusHandler.java +++ b/container-core/src/main/java/com/yahoo/container/handler/VipStatusHandler.java @@ -14,7 +14,6 @@ import com.yahoo.container.core.VipStatusConfig; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.jdisc.ThreadedHttpRequestHandler; -import com.yahoo.container.logging.AccessLog; import com.yahoo.jdisc.Metric; import com.yahoo.log.LogLevel; import com.yahoo.text.Utf8; diff --git a/container-disc/pom.xml b/container-disc/pom.xml index 1cc9bddce28..1bdfe600760 100644 --- a/container-disc/pom.xml +++ b/container-disc/pom.xml @@ -131,6 +131,12 @@ </dependency> <!-- end WARNING --> </dependencies> + <properties> + <!-- hk2 version must be the one used by current jersey version. + This is just to avoid duplicating it below, _not_ to enforce a particular version of hk2. + Do not move this to the root pom! --> + <hk2.version>2.4.0-b12</hk2.version> + </properties> <build> <plugins> <plugin> @@ -165,10 +171,10 @@ defaults-jar-with-dependencies.jar, component-jar-with-dependencies.jar, <!-- jersey2 --> - aopalliance-repackaged-2.3.0-b05.jar, - hk2-api-2.3.0-b05.jar, - hk2-locator-2.3.0-b05.jar, - hk2-utils-2.3.0-b05.jar, + aopalliance-repackaged-${hk2.version}.jar, + hk2-api-${hk2.version}.jar, + hk2-locator-${hk2.version}.jar, + hk2-utils-${hk2.version}.jar, jackson-annotations-${jackson2.version}.jar, jackson-core-${jackson2.version}.jar, jackson-databind-${jackson2.version}.jar, @@ -182,10 +188,12 @@ jersey-common-${jersey2.version}.jar, jersey-container-servlet-${jersey2.version}.jar, jersey-container-servlet-core-${jersey2.version}.jar, + jersey-entity-filtering-${jersey2.version}.jar, <!-- new feature from 2.16, provided for convenience --> jersey-guava-${jersey2.version}.jar, + jersey-media-jaxb-${jersey2.version}.jar, jersey-media-json-jackson-${jersey2.version}.jar, jersey-media-multipart-${jersey2.version}.jar, - mimepull-1.9.3.jar, <!-- needed by media-multipart --> + mimepull-1.9.5.jar, <!-- needed by media-multipart --> jersey-server-${jersey2.version}.jar, jersey-proxy-client-${jersey2.version}.jar, osgi-resource-locator-1.0.1.jar, diff --git a/container-disc/src/main/sh/vespa-start-container-daemon.sh b/container-disc/src/main/sh/vespa-start-container-daemon.sh index 3afefd6f86d..ec632a9c0c4 100755 --- a/container-disc/src/main/sh/vespa-start-container-daemon.sh +++ b/container-disc/src/main/sh/vespa-start-container-daemon.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #set -x @@ -65,14 +65,21 @@ configure_memory() { consider_fallback jvm_baseMaxDirectMemorySize 75 consider_fallback jvm_directMemorySizeCache 0 - if (( jvm_heapSizeAsPercentageOfPhysicalMemory > 0 && jvm_heapSizeAsPercentageOfPhysicalMemory < 100 )); then - available=`free -m | grep Mem | tr -s ' ' | cut -f2 -d' '` - jvm_heapsize=$[available * jvm_heapSizeAsPercentageOfPhysicalMemory / 100] + # Update jvm_heapsize only if percentage is explicitly set (default is 0). + if ((jvm_heapSizeAsPercentageOfPhysicalMemory > 0)); then + if ((TOTAL_MEMORY_MB > 0)); then + available="$TOTAL_MEMORY_MB" + else + available=`free -m | grep Mem | tr -s ' ' | cut -f2 -d' '` + fi + + jvm_heapsize=$((available * jvm_heapSizeAsPercentageOfPhysicalMemory / 100)) if (( jvm_heapsize < 1024 )); then jvm_heapsize=1024 fi fi - maxDirectMemorySize=$(( ${jvm_baseMaxDirectMemorySize} + ${jvm_heapsize}/8 + ${jvm_directMemorySizeCache} )) + + maxDirectMemorySize=$(( jvm_baseMaxDirectMemorySize + jvm_heapsize / 8 + jvm_directMemorySizeCache )) memory_options="-Xms${jvm_heapsize}m -Xmx${jvm_heapsize}m" memory_options="${memory_options} -XX:ThreadStackSize=${jvm_stacksize}" diff --git a/container-search/src/main/java/com/yahoo/data/JsonProducer.java b/container-search/src/main/java/com/yahoo/data/JsonProducer.java index 488f2b4c720..b4ccc95f228 100644 --- a/container-search/src/main/java/com/yahoo/data/JsonProducer.java +++ b/container-search/src/main/java/com/yahoo/data/JsonProducer.java @@ -3,7 +3,7 @@ package com.yahoo.data; /** * Generic API for classes that contain data representable as JSON. - **/ + */ public interface JsonProducer { /** @@ -14,13 +14,14 @@ public interface JsonProducer { * canonical format. * @param target the StringBuilder to append to. * @return the target passed in is also returned (to allow chaining). - **/ - public StringBuilder writeJson(StringBuilder target); + */ + StringBuilder writeJson(StringBuilder target); /** * Convenience method equivalent to: * makeJson(new StringBuilder()).toString() * @return String containing JSON representation of this object's data. - **/ - public String toJson(); + */ + String toJson(); + } diff --git a/container-search/src/main/java/com/yahoo/data/XmlProducer.java b/container-search/src/main/java/com/yahoo/data/XmlProducer.java index d1d65aab095..e0c93f79eb5 100644 --- a/container-search/src/main/java/com/yahoo/data/XmlProducer.java +++ b/container-search/src/main/java/com/yahoo/data/XmlProducer.java @@ -3,21 +3,22 @@ package com.yahoo.data; /** * Generic API for classes that contain data representable as XML. - **/ + */ public interface XmlProducer { /** * Append the XML representation of this object's data to a StringBuilder. * @param target the StringBuilder to append to. * @return the target passed in is also returned (to allow chaining). - **/ - public StringBuilder writeXML(StringBuilder target); + */ + StringBuilder writeXML(StringBuilder target); /** * Convenience method equivalent to: * makeXML(new StringBuilder()).toString() * @return String containing XML representation of this object's data. - **/ - public String toXML(); + */ + String toXML(); + } diff --git a/container-search/src/main/java/com/yahoo/prelude/VespaSVersionRetriever.java b/container-search/src/main/java/com/yahoo/prelude/VespaSVersionRetriever.java deleted file mode 100644 index 9d6d4f55fb3..00000000000 --- a/container-search/src/main/java/com/yahoo/prelude/VespaSVersionRetriever.java +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.prelude; - -import java.io.IOException; -import java.util.jar.Manifest; - -/** - * Retrieves Vespa-Version from the manifest file. - * - * @author tonytv - */ -public class VespaSVersionRetriever { - - public static String getVersion() { - return version; - } - - private static String version = retrieveVersion(); - - private static String retrieveVersion() { - try { - Manifest manifest = new Manifest(VespaSVersionRetriever.class.getResourceAsStream("/META-INF/MANIFEST.MF")); - manifest.getMainAttributes().entrySet(); - return manifest.getMainAttributes().getValue("Vespa-Version"); - } catch (IOException e) { - return "not available."; - } - } -} diff --git a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterMonitor.java b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterMonitor.java index adbae459e5d..871ecc37ea5 100644 --- a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterMonitor.java +++ b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterMonitor.java @@ -2,6 +2,7 @@ package com.yahoo.prelude.cluster; import java.util.Map; +import java.util.Optional; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.logging.Level; @@ -21,13 +22,18 @@ import com.yahoo.search.result.ErrorMessage; */ public class ClusterMonitor implements Runnable, Freezable { + // The ping thread wil start using the system, but we cannot be guaranteed that all components + // in the system is up. As a workaround for not being able to find out when the system + // is ready to be used, we wait some time before starting the ping thread + private static final int pingThreadInitialDelayMs = 3000; + private final MonitorConfiguration configuration; private final static Logger log = Logger.getLogger(ClusterMonitor.class.getName()); private final ClusterSearcher nodeManager; - private final VipStatus vipStatus; + private final Optional<VipStatus> vipStatus; /** A map from Node to corresponding MonitoredNode */ private final Map<VespaBackEndSearcher, NodeMonitor> nodeMonitors = new java.util.IdentityHashMap<>(); @@ -35,7 +41,7 @@ public class ClusterMonitor implements Runnable, Freezable { private boolean isFrozen = false; - ClusterMonitor(ClusterSearcher manager, QrMonitorConfig monitorConfig, VipStatus vipStatus) { + ClusterMonitor(ClusterSearcher manager, QrMonitorConfig monitorConfig, Optional<VipStatus> vipStatus) { configuration = new MonitorConfiguration(monitorConfig); nodeManager = manager; this.vipStatus = vipStatus; @@ -48,21 +54,18 @@ public class ClusterMonitor implements Runnable, Freezable { } void startPingThread() { - if (!isFrozen()) { - throw new IllegalStateException( - "Do not start the monitoring thread before the set of" - +" nodes to monitor is complete/the ClusterMonitor is frozen."); - } - future = nodeManager.getScheduledExecutor().scheduleAtFixedRate(this, 30 * 1000, configuration.getCheckInterval(), TimeUnit.MILLISECONDS); + if ( ! isFrozen()) + throw new IllegalStateException("Do not start the monitoring thread before the set of " + + "nodes to monitor is complete/the ClusterMonitor is frozen."); + future = nodeManager.getScheduledExecutor().scheduleAtFixedRate(this, pingThreadInitialDelayMs, configuration.getCheckInterval(), TimeUnit.MILLISECONDS); } /** * Adds a new node for monitoring. */ void add(VespaBackEndSearcher node) { - if (isFrozen()) { + if (isFrozen()) throw new IllegalStateException("Can not add new nodes after ClusterMonitor has been frozen."); - } nodeMonitors.put(node, new NodeMonitor(node)); } @@ -72,7 +75,6 @@ public class ClusterMonitor implements Runnable, Freezable { boolean wasWorking = monitor.isWorking(); monitor.failed(error); if (wasWorking && !monitor.isWorking()) { - // was warning, see VESPA-1922 log.info("Failed monitoring node '" + node + "' due to '" + error); nodeManager.failed(node); } @@ -80,10 +82,10 @@ public class ClusterMonitor implements Runnable, Freezable { } /** Called when a node responded */ - void responded(VespaBackEndSearcher node, boolean hasDocumentsOnline) { + void responded(VespaBackEndSearcher node, boolean hasSearchNodesOnline) { NodeMonitor monitor = nodeMonitors.get(node); boolean wasFailing = !monitor.isWorking(); - monitor.responded(hasDocumentsOnline); + monitor.responded(hasSearchNodesOnline); if (wasFailing && monitor.isWorking()) { log.info("Failed node '" + node + "' started working again."); nodeManager.working(monitor.getNode()); @@ -92,6 +94,8 @@ public class ClusterMonitor implements Runnable, Freezable { } private void updateVipStatus() { + if ( ! vipStatus.isPresent()) return; + boolean hasWorkingNodesWithDocumentsOnline = false; for (NodeMonitor node : nodeMonitors.values()) { if (node.isWorking() && node.searchNodesOnline()) { @@ -100,9 +104,9 @@ public class ClusterMonitor implements Runnable, Freezable { } } if (hasWorkingNodesWithDocumentsOnline) { - vipStatus.addToRotation(this); + vipStatus.get().addToRotation(this); } else { - vipStatus.removeFromRotation(this); + vipStatus.get().removeFromRotation(this); } } diff --git a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java index 46ef5a41ed5..36ae057b418 100644 --- a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java +++ b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterSearcher.java @@ -107,29 +107,33 @@ public class ClusterSearcher extends Searcher { super(id); this.hasher = new Hasher(); this.fs4ResourcePool = fs4ResourcePool; - monitor = new ClusterMonitor(this, monitorConfig, vipStatus); + + Dispatcher dispatcher = new Dispatcher(dispatchConfig, fs4ResourcePool, clusterInfoConfig.nodeCount(), vipStatus); + + if (dispatcher.searchCluster().directDispatchTarget().isPresent()) // dispatcher should decide vip status instead + monitor = new ClusterMonitor(this, monitorConfig, Optional.empty()); + else + monitor = new ClusterMonitor(this, monitorConfig, Optional.of(vipStatus)); + int searchClusterIndex = clusterConfig.clusterId(); clusterModelName = clusterConfig.clusterName(); QrSearchersConfig.Searchcluster searchClusterConfig = getSearchClusterConfigFromClusterName(qrsConfig, clusterModelName); documentTypes = new LinkedHashSet<>(); failoverToRemote = clusterConfig.failoverToRemote(); - Dispatcher dispatcher = new Dispatcher(dispatchConfig, fs4ResourcePool); String eventName = clusterModelName + ".cache_hit_ratio"; - cacheHitRatio = new Value(eventName, manager, new Value.Parameters() - .setNameExtension(false).setLogRaw(false).setLogMean(true)); + cacheHitRatio = new Value(eventName, manager, new Value.Parameters().setNameExtension(false) + .setLogRaw(false).setLogMean(true)); maxQueryTimeout = ParameterParser.asMilliSeconds(clusterConfig.maxQueryTimeout(), DEFAULT_MAX_QUERY_TIMEOUT); maxQueryCacheTimeout = ParameterParser.asMilliSeconds(clusterConfig.maxQueryCacheTimeout(), - DEFAULT_MAX_QUERY_CACHE_TIMEOUT); + DEFAULT_MAX_QUERY_CACHE_TIMEOUT); CacheParams cacheParams = new CacheParams(createCache(clusterConfig, clusterModelName)); SummaryParameters docSumParams = new SummaryParameters(qrsConfig .com().yahoo().prelude().fastsearch().FastSearcher().docsum() .defaultclass()); - int containerClusterSize = clusterInfoConfig.nodeCount(); - for (DocumentdbInfoConfig.Documentdb docDb : documentDbConfig.documentdb()) { String docTypeName = docDb.name(); documentTypes.add(docTypeName); @@ -151,7 +155,7 @@ public class ClusterSearcher extends Searcher { Backend b = createBackend(searchClusterConfig.dispatcher(dispatcherIndex)); FastSearcher searcher = searchDispatch(searchClusterIndex, fs4ResourcePool, searchClusterConfig, cacheParams, emulationConfig, docSumParams, - documentDbConfig, b, dispatcher, dispatcherIndex, containerClusterSize); + documentDbConfig, b, dispatcher, dispatcherIndex); try { searcher.setLocalDispatching( ! isRemote(searchClusterConfig.dispatcher(dispatcherIndex).host())); } catch (UnknownHostException e) { @@ -162,10 +166,10 @@ public class ClusterSearcher extends Searcher { gotExpectedBackend |= searcher.isLocalDispatching(); } } - if (!gotExpectedBackend) { + if ( ! gotExpectedBackend) { log.log(Level.SEVERE, "ClusterSearcher should have a local top level dispatch." - + " The possibility to configure dispatchers explicitly will be removed" - + " in a future release."); + + " The possibility to configure dispatchers explicitly will be removed" + + " in a future release."); } hasher.running = true; monitor.freeze(); @@ -210,14 +214,13 @@ public class ClusterSearcher extends Searcher { DocumentdbInfoConfig documentdbInfoConfig, Backend backend, Dispatcher dispatcher, - int dispatcherIndex, - int containerClusterSize) { + int dispatcherIndex) { ClusterParams clusterParams = makeClusterParams(searchclusterIndex, searchClusterConfig, emulConfig, dispatcherIndex); return new FastSearcher(backend, fs4ResourcePool, dispatcher, docSumParams, clusterParams, cacheParams, - documentdbInfoConfig, containerClusterSize); + documentdbInfoConfig); } private static VdsStreamingSearcher vdsCluster(int searchclusterIndex, @@ -231,10 +234,8 @@ public class ClusterSearcher extends Searcher { emulConfig, 0); VdsStreamingSearcher searcher = (VdsStreamingSearcher) VespaBackEndSearcher .getSearcher("com.yahoo.vespa.streamingvisitors.VdsStreamingSearcher"); - searcher.setSearchClusterConfigId(searchClusterConfig - .rankprofiles().configid()); - searcher.setStorageClusterRouteSpec(searchClusterConfig - .storagecluster().routespec()); + searcher.setSearchClusterConfigId(searchClusterConfig.rankprofiles().configid()); + searcher.setStorageClusterRouteSpec(searchClusterConfig.storagecluster().routespec()); searcher.init(docSumParams, clusterParams, cacheParams, documentdbInfoConfig); return searcher; } @@ -244,10 +245,9 @@ public class ClusterSearcher extends Searcher { this.hasher = new Hasher(); this.failoverToRemote = false; this.documentTypes = documentTypes; - monitor = new ClusterMonitor(this, new QrMonitorConfig(new QrMonitorConfig.Builder()), new VipStatus()); - cacheHitRatio = new Value( - "com.yahoo.prelude.cluster.ClusterSearcher.ClusterSearcher().dummy", - Statistics.nullImplementation, new Value.Parameters()); + monitor = new ClusterMonitor(this, new QrMonitorConfig(new QrMonitorConfig.Builder()), Optional.of(new VipStatus())); + cacheHitRatio = new Value("com.yahoo.prelude.cluster.ClusterSearcher.ClusterSearcher().dummy", + Statistics.nullImplementation, new Value.Parameters()); clusterModelName = "testScenario"; fs4ResourcePool = null; maxQueryTimeout = DEFAULT_MAX_QUERY_TIMEOUT; @@ -268,8 +268,8 @@ public class ClusterSearcher extends Searcher { private static CacheControl createCache(ClusterConfig config, String clusterModelName) { log.log(Level.INFO, "Enabling cache for search cluster " - + clusterModelName + " (size=" + config.cacheSize() - + ", timeout=" + config.cacheTimeout() + ")"); + + clusterModelName + " (size=" + config.cacheSize() + + ", timeout=" + config.cacheTimeout() + ")"); return new CacheControl(config.cacheSize(), config.cacheTimeout()); } @@ -311,6 +311,7 @@ public class ClusterSearcher extends Searcher { * @return null if request rank profile is ok for the requested * doc types, a result with error message if not. */ + // TODO: This should be in a separate searcher private Result checkValidRankProfiles(Query query, Set<String> docTypes) { String rankProfile = query.getRanking().getProfile(); Set<String> invalidInDocTypes = null; @@ -405,8 +406,7 @@ public class ClusterSearcher extends Searcher { int tries = 0; do { - // The loop is in case there are other searchers available - // able to produce results + // The loop is in case there are other searchers available able to produce results validateQueryTimeout(query); validateQueryCache(query); VespaBackEndSearcher searcher = hasher.select(tries++); diff --git a/container-search/src/main/java/com/yahoo/prelude/cluster/NodeMonitor.java b/container-search/src/main/java/com/yahoo/prelude/cluster/NodeMonitor.java index eb747b2bbee..c06b7fe04ba 100644 --- a/container-search/src/main/java/com/yahoo/prelude/cluster/NodeMonitor.java +++ b/container-search/src/main/java/com/yahoo/prelude/cluster/NodeMonitor.java @@ -18,7 +18,7 @@ import com.yahoo.search.result.ErrorMessage; * </ul> * * @author bratseth - * @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a> + * @author Steinar Knutsen */ public class NodeMonitor { @@ -65,8 +65,8 @@ public class NodeMonitor { public void failed(ErrorMessage error) { long respondedAt = System.currentTimeMillis(); - if (error.getCode() == BACKEND_COMMUNICATION_ERROR.code - || error.getCode() == NO_ANSWER_WHEN_PINGING_NODE.code) { + if (error.getCode() == BACKEND_COMMUNICATION_ERROR.code + || error.getCode() == NO_ANSWER_WHEN_PINGING_NODE.code) { // Only count not being able to talk to backend at all // as errors we care about if ((respondedAt - succeededAt) > 10000) { @@ -85,9 +85,8 @@ public class NodeMonitor { this.searchNodesOnline = searchNodesOnline; atStartUp = false; - if (!isWorking) { + if ( ! isWorking) setWorking(true, "Responds correctly"); - } } /** Changes the state of this node if required */ @@ -95,20 +94,15 @@ public class NodeMonitor { if (isWorking == working) return; // Old news String explanationToLog; - if (explanation == null) { + if (explanation == null) explanationToLog = ""; - } else { + else explanationToLog = ": " + explanation; - } - if (working) { + if (working) log.info("Putting " + node + " in service" + explanationToLog); - } else { - if (!atStartUp) { - // was warning, see VESPA-1922 - log.info("Taking " + node + " out of service" + explanationToLog); - } - } + else if ( ! atStartUp) + log.info("Taking " + node + " out of service" + explanationToLog); isWorking = working; } diff --git a/container-search/src/main/java/com/yahoo/prelude/cluster/dispatchprototype/DispatchClusterSearcher.java b/container-search/src/main/java/com/yahoo/prelude/cluster/dispatchprototype/DispatchClusterSearcher.java deleted file mode 100644 index dbcee3234ea..00000000000 --- a/container-search/src/main/java/com/yahoo/prelude/cluster/dispatchprototype/DispatchClusterSearcher.java +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.prelude.cluster.dispatchprototype; - -import com.google.common.annotations.Beta; -import com.yahoo.cloud.config.ClusterInfoConfig; -import com.yahoo.component.ComponentId; -import com.yahoo.component.chain.dependencies.After; -import com.yahoo.component.provider.ComponentRegistry; -import com.yahoo.container.QrSearchersConfig; -import com.yahoo.container.handler.VipStatus; -import com.yahoo.container.search.LegacyEmulationConfig; -import com.yahoo.prelude.cluster.ClusterSearcher; -import com.yahoo.prelude.cluster.QrMonitorConfig; -import com.yahoo.prelude.fastsearch.DocumentdbInfoConfig; -import com.yahoo.prelude.fastsearch.FS4ResourcePool; -import com.yahoo.search.Query; -import com.yahoo.search.Result; -import com.yahoo.search.Searcher; -import com.yahoo.search.config.ClusterConfig; -import com.yahoo.search.config.dispatchprototype.SearchNodesConfig; -import com.yahoo.search.searchchain.Execution; -import com.yahoo.statistics.Statistics; -import com.yahoo.vespa.config.search.DispatchConfig; - -import static com.yahoo.container.QrSearchersConfig.Searchcluster; - -/** - * This class modifies ClusterSearcher behavior to talk directly to search nodes instead of dispatchers. - * - * This means that queries are sent to a single search node only. Obviously, this will not give correct - * results - it is just a single step towards eliminating top-level dispatch as a separate process. - * - * @author bakksjo - */ -// 2016-08-16 (bratseth): We should probably just remove this now. It was a prototype that never went anywhere -@Beta -@After("*") -public class DispatchClusterSearcher extends Searcher { - private final ClusterSearcher clusterSearcher; - - public DispatchClusterSearcher( - final ComponentId id, - final SearchNodesConfig searchNodesConfig, - final QrSearchersConfig qrsConfig, - final ClusterConfig clusterConfig, - final DocumentdbInfoConfig documentDbConfig, - final LegacyEmulationConfig emulationConfig, - final QrMonitorConfig monitorConfig, - final DispatchConfig dispatchConfig, - final ClusterInfoConfig clusterInfoConfig, - final Statistics manager, - final FS4ResourcePool listeners, - final ComponentRegistry<ClusterSearcher> otherClusterSearchers, - final VipStatus vipStatus) { - - clusterSearcher = new ClusterSearcher( - id, - makeQrSearchersConfigWithSearchNodesInsteadOfDispatcherNodes( - qrsConfig, - searchNodesConfig, - clusterConfig.clusterName()), - clusterConfig, - documentDbConfig, - emulationConfig, - monitorConfig, - dispatchConfig, - clusterInfoConfig, - manager, - listeners, - vipStatus); - - // Prevent the ClusterSearcher(s) implicitly set up by the model from warning that it can't contact - // the c++ TLD when we disable it in the system test. - otherClusterSearchers.allComponents().stream() - .forEach(ClusterSearcher::deconstruct); - } - - - @Override - public Result search(Query query, Execution execution) { - return clusterSearcher.search(query, execution); - } - - @Override - public void fill(Result result, String summaryClass, Execution execution) { - clusterSearcher.fill(result, summaryClass, execution); - } - - private static QrSearchersConfig makeQrSearchersConfigWithSearchNodesInsteadOfDispatcherNodes( - final QrSearchersConfig qrsConfig, - final SearchNodesConfig searchNodesConfig, - final String clusterName) { - final QrSearchersConfig.Builder qrSearchersConfigBuilder = new QrSearchersConfig.Builder(); - copyEverythingExceptSearchclusters(qrsConfig, qrSearchersConfigBuilder); - - // We only "copy" (with modifications) a single Searchcluster. - final Searchcluster originalSearchcluster = getSearchclusterByName(qrsConfig, clusterName); - final Searchcluster.Builder searchclusterBuilder = new Searchcluster.Builder(); - copyEverythingExceptDispatchers(originalSearchcluster, searchclusterBuilder); - // Here comes the trick: Substitute search nodes for dispatchers. - for (final SearchNodesConfig.Search_node searchNodeConfig : searchNodesConfig.search_node()) { - searchclusterBuilder.dispatcher( - new Searchcluster.Dispatcher.Builder() - .host(searchNodeConfig.host()) - .port(searchNodeConfig.port())); - } - qrSearchersConfigBuilder.searchcluster(searchclusterBuilder); - - return new QrSearchersConfig(qrSearchersConfigBuilder); - } - - private static void copyEverythingExceptSearchclusters( - final QrSearchersConfig source, - final QrSearchersConfig.Builder destination) { - destination.tag(new QrSearchersConfig.Tag.Builder(source.tag())); - destination.com(new QrSearchersConfig.Com.Builder(source.com())); - destination.customizedsearchers(new QrSearchersConfig.Customizedsearchers.Builder(source.customizedsearchers())); - for (final QrSearchersConfig.External external : source.external()) { - destination.external(new QrSearchersConfig.External.Builder(external)); - } - } - - private static Searchcluster getSearchclusterByName(final QrSearchersConfig qrsConfig, final String clusterName) { - return qrsConfig.searchcluster().stream() - .filter(cluster -> clusterName.equals(cluster.name())) - .findAny() - .orElseThrow(() -> new IllegalStateException("No cluster found with name " + clusterName)); - } - - private static void copyEverythingExceptDispatchers( - final Searchcluster source, - final Searchcluster.Builder destination) { - destination - .name(source.name()) - .searchdef(source.searchdef()) - .rankprofiles(new Searchcluster.Rankprofiles.Builder(source.rankprofiles())) - .indexingmode(source.indexingmode()) - // Deliberately excluding storagecluster here because it's not relevant. - .rowbits(source.rowbits()); - } -} diff --git a/container-search/src/main/java/com/yahoo/prelude/cluster/package-info.java b/container-search/src/main/java/com/yahoo/prelude/cluster/package-info.java index e4dbfbb3a1b..26ba438bb68 100644 --- a/container-search/src/main/java/com/yahoo/prelude/cluster/package-info.java +++ b/container-search/src/main/java/com/yahoo/prelude/cluster/package-info.java @@ -1,4 +1,9 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +/** + * Cluster monitoring for content clusters queried over fnet. + * Others (e.g HTTP connections and content clusters queried over RPC) + * should use com.yahoo.search.cluster + */ @ExportPackage package com.yahoo.prelude.cluster; diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastHit.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastHit.java index ee3f9ac0583..fe0c4a35d1e 100644 --- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastHit.java +++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastHit.java @@ -19,7 +19,7 @@ import com.yahoo.data.access.simple.Value.StringValue; */ public class FastHit extends Hit { - public static final String SUMMARY = "summary"; + public static final String SUMMARY = "summary"; // TODO: Remove on Vespa 7 private static final long serialVersionUID = 298098891191029589L; @@ -62,16 +62,18 @@ public class FastHit extends Hit { */ public FastHit() { } + // Note: This constructor is only used for tests, production use is always of the empty constructor public FastHit(String uri, double relevancy) { this(uri, relevancy, null); } + // Note: This constructor is only used for tests, production use is always of the empty constructor public FastHit(String uri, double relevance, String source) { setId(uri); - super.setField("uri", uri); + super.setField("uri", uri); // TODO: Remove on Vespa 7 setRelevance(new Relevance(relevance)); setSource(source); - types().add(SUMMARY); + types().add(SUMMARY); // TODO: Remove on Vespa 7 setPartId(0, 0); } diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastSearcher.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastSearcher.java index 644199520e7..504c5dd363a 100644 --- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastSearcher.java +++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastSearcher.java @@ -3,7 +3,6 @@ package com.yahoo.prelude.fastsearch; import java.util.Optional; -import com.google.common.collect.ImmutableCollection; import com.yahoo.compress.CompressionType; import com.yahoo.fs4.BasicPacket; import com.yahoo.fs4.ChannelTimeoutException; @@ -71,7 +70,6 @@ public class FastSearcher extends VespaBackEndSearcher { private final FS4ResourcePool fs4ResourcePool; private final String selfHostname; - private final int containerClusterSize; /** * Creates a Fastsearcher. @@ -88,18 +86,15 @@ public class FastSearcher extends VespaBackEndSearcher { * @param clusterParams the cluster number, and other cluster backend parameters * @param cacheParams the size, lifetime, and controller of our cache * @param documentdbInfoConfig document database parameters - * @param containerClusterSize the size of the cluster this is part of */ public FastSearcher(Backend dispatchBackend, FS4ResourcePool fs4ResourcePool, Dispatcher dispatcher, SummaryParameters docSumParams, ClusterParams clusterParams, - CacheParams cacheParams, DocumentdbInfoConfig documentdbInfoConfig, - int containerClusterSize) { + CacheParams cacheParams, DocumentdbInfoConfig documentdbInfoConfig) { init(docSumParams, clusterParams, cacheParams, documentdbInfoConfig); this.dispatchBackend = dispatchBackend; this.fs4ResourcePool = fs4ResourcePool; this.dispatcher = dispatcher; this.selfHostname = HostName.getLocalhost(); - this.containerClusterSize = containerClusterSize; } private static SimpleDateFormat isoDateFormat; @@ -226,39 +221,17 @@ public class FastSearcher extends VespaBackEndSearcher { private Backend chooseBackend(Query query) { // TODO 2016-08-16: Turn this on by default (by changing the 'false' below to 'true') if ( ! query.properties().getBoolean(dispatchDirect, false)) return dispatchBackend; - - // A search node in the search cluster in question is configured on the same host as the currently running container. - // It has all the data <==> No other nodes in the search cluster have the same group id as this node. - // That local search node responds. - // The search cluster to be searched has at least as many nodes as the container cluster we're running in. - ImmutableCollection<SearchCluster.Node> localSearchNodes = dispatcher.searchCluster().nodesByHost().get(selfHostname); - // Only use direct dispatch if we have exactly 1 search node on the same machine: - if (localSearchNodes.size() != 1) return dispatchBackend; - - SearchCluster.Node localSearchNode = localSearchNodes.iterator().next(); - SearchCluster.Group localSearchGroup = dispatcher.searchCluster().groups().get(localSearchNode.group()); - - // Only use direct dispatch if the local search node has the entire corpus - if (localSearchGroup.nodes().size() != 1) return dispatchBackend; - - // Only use direct dispatch if this container cluster has at least as many nodes as the search cluster - // to avoid load skew/preserve fanout in the case where a subset of the search nodes are also containers. - // This disregards the case where the search and container clusters are partially overlapping. - // Such configurations produce skewed load in any case. - if (containerClusterSize < dispatcher.searchCluster().size()) return dispatchBackend; - - // Only use direct dispatch if the upstream ClusterSearcher chose the local dispatch - // (otherwise, we may be in this method due to a failover situation) - if ( ! dispatchBackend.getHost().equals(selfHostname)) return dispatchBackend; - // Only use direct dispatch if the local grouop has sufficient coverage - if ( ! localSearchGroup.hasSufficientCoverage()) return dispatchBackend; + // Don't use direct dispatch if the upstream ClusterSearcher did not chose the local dispatch + // as that probably means that we are in a failover situation + if ( ! dispatchBackend.getHost().equals(selfHostname)) return dispatchBackend; - // Only use direct dispatch if the local search node is up - if ( ! localSearchNode.isWorking()) return dispatchBackend; + Optional<SearchCluster.Node> directDispatchRecipient = dispatcher.searchCluster().directDispatchTarget(); + if ( ! directDispatchRecipient.isPresent()) return dispatchBackend; - query.trace(false, 2, "Dispatching directly to ", localSearchNode); - return fs4ResourcePool.getBackend(localSearchNode.hostname(), localSearchNode.fs4port()); + query.trace(false, 2, "Dispatching directly to ", directDispatchRecipient.get()); + return fs4ResourcePool.getBackend(directDispatchRecipient.get().hostname(), + directDispatchRecipient.get().fs4port()); } /** diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/PacketCache.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/PacketCache.java index 01f12753ea7..dca23d13ba6 100644 --- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/PacketCache.java +++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/PacketCache.java @@ -19,9 +19,6 @@ import com.yahoo.log.LogLevel; */ public class PacketCache extends LinkedHashMap<CacheKey, PacketWrapper> { - /** - * - */ private static final long serialVersionUID = -7403077211906108356L; /** The <i>current</i> number of bytes of packets in this cache */ diff --git a/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java b/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java index c02824420d5..ca8214f35d6 100644 --- a/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java +++ b/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java @@ -108,11 +108,8 @@ public class StemmingSearcher extends Searcher { return reverseConnectivity; } - private Item scan(Item item, - boolean isCJK, - Language l, - IndexFacts.Session indexFacts, - Map<Item, TaggableItem> reverseConnectivity) { + private Item scan(Item item, boolean isCJK, Language l, IndexFacts.Session indexFacts, + Map<Item, TaggableItem> reverseConnectivity) { if (item == null) { return null; } else if (item instanceof BlockItem) { @@ -153,9 +150,8 @@ public class StemmingSearcher extends Searcher { if (i instanceof TermItem) { return ((TermItem) i).getOrigin(); // this should always be the case } else { - getLogger().log(LogLevel.WARNING, - "Weird, BlockItem '" + b + "' was a composite containing " + i.getClass().getName() - + ", expected TermItem."); + getLogger().log(LogLevel.WARNING, "Weird, BlockItem '" + b + "' was a composite containing " + + i.getClass().getName() + ", expected TermItem."); } } return null; @@ -217,8 +213,8 @@ public class StemmingSearcher extends Searcher { setConnectivity(current, reverseConnectivity, replacement); } - private void andSegmentConnectivity(BlockItem current, - Map<Item, TaggableItem> reverseConnectivity, CompositeItem composite) { + private void andSegmentConnectivity(BlockItem current, Map<Item, TaggableItem> reverseConnectivity, + CompositeItem composite) { // if the original has connectivity to something, add to last word Connectivity connectivity = getConnectivity(current); if (connectivity != null) { @@ -269,8 +265,7 @@ public class StemmingSearcher extends Searcher { private TaggableItem singleWordSegment(BlockItem current, StemList segment, Index index, - Substring substring) - { + Substring substring) { String indexName = current.getIndexName(); if (index.getLiteralBoost() || index.getStemMode() == StemMode.ALL) { // Yes, this will create a new WordAlternativesItem even if stemmed @@ -301,8 +296,7 @@ public class StemmingSearcher extends Searcher { } private WordItem singleStemSegment(Item blockAsItem, String stem, String indexName, - Substring substring) - { + Substring substring) { WordItem replacement = new WordItem(stem, indexName, true, substring); replacement.setStemmed(true); copyAttributes(blockAsItem, replacement); @@ -311,8 +305,7 @@ public class StemmingSearcher extends Searcher { private void setConnectivity(BlockItem current, Map<Item, TaggableItem> reverseConnectivity, - Item replacement) - { + Item replacement) { if (reverseConnectivity != null && !reverseConnectivity.isEmpty()) { // This Map<Item, TaggableItem>.get(BlockItem) is technically wrong, but the Item API ensures its correctness TaggableItem connectedTo = reverseConnectivity.get(current); @@ -425,4 +418,5 @@ public class StemmingSearcher extends Searcher { } } + } diff --git a/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java b/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java index 520903f2210..0916e3f4f9a 100644 --- a/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java +++ b/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java @@ -20,7 +20,7 @@ import java.util.logging.Logger; * The monitor uses an internal thread for node monitoring. * All <i>public</i> methods of this class are multithread safe. * - * @author bratseth + * @author bratseth */ public class ClusterMonitor<T> { diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java index ca6445cff44..ef81d404b29 100644 --- a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java +++ b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java @@ -3,11 +3,11 @@ package com.yahoo.search.dispatch; import com.google.common.annotations.Beta; import com.google.common.collect.ImmutableMap; -import com.google.inject.Inject; import com.yahoo.collections.ListMap; import com.yahoo.component.AbstractComponent; import com.yahoo.compress.CompressionType; import com.yahoo.compress.Compressor; +import com.yahoo.container.handler.VipStatus; import com.yahoo.data.access.slime.SlimeAdapter; import com.yahoo.prelude.fastsearch.FS4ResourcePool; import com.yahoo.prelude.fastsearch.FastHit; @@ -51,10 +51,10 @@ public class Dispatcher extends AbstractComponent { private final Compressor compressor = new Compressor(); - @Inject - public Dispatcher(DispatchConfig dispatchConfig, FS4ResourcePool fs4ResourcePool) { + public Dispatcher(DispatchConfig dispatchConfig, FS4ResourcePool fs4ResourcePool, + int containerClusterSize, VipStatus vipStatus) { this.client = new RpcClient(); - this.searchCluster = new SearchCluster(dispatchConfig, fs4ResourcePool); + this.searchCluster = new SearchCluster(dispatchConfig, fs4ResourcePool, containerClusterSize, vipStatus); // Create node rpc connections, indexed by the legacy "partid", which allows us to bridge // between fs4 calls (for search) and rpc calls (for summary fetch) diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/SearchCluster.java b/container-search/src/main/java/com/yahoo/search/dispatch/SearchCluster.java index bd47c0525ab..9d1b3565db8 100644 --- a/container-search/src/main/java/com/yahoo/search/dispatch/SearchCluster.java +++ b/container-search/src/main/java/com/yahoo/search/dispatch/SearchCluster.java @@ -1,9 +1,12 @@ package com.yahoo.search.dispatch; import com.google.common.annotations.Beta; +import com.google.common.collect.ImmutableCollection; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMultimap; +import com.yahoo.container.handler.VipStatus; +import com.yahoo.net.HostName; import com.yahoo.search.cluster.ClusterMonitor; import com.yahoo.search.cluster.NodeManager; import com.yahoo.search.result.ErrorMessage; @@ -19,6 +22,7 @@ import com.yahoo.prelude.fastsearch.FS4ResourcePool; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; @@ -47,19 +51,34 @@ public class SearchCluster implements NodeManager<SearchCluster.Node> { private final ImmutableMap<Integer, Group> groups; private final ImmutableMultimap<String, Node> nodesByHost; private final ClusterMonitor<Node> clusterMonitor; + private final VipStatus vipStatus; + + /** + * A search node on this local machine having the entire corpus, which we therefore + * should prefer to dispatch directly to, or empty if there is no such local search node. + * If there is one, we also maintain the VIP status of this container based on the availability + * of the corpus on this local node (up + has coverage), such that this node is taken out of rotation + * if it only queries this cluster when the local node cannot be used, to avoid unnecessary + * cross-node network traffic. + */ + private final Optional<Node> directDispatchTarget; // Only needed until query requests are moved to rpc private final FS4ResourcePool fs4ResourcePool; - public SearchCluster(DispatchConfig dispatchConfig, FS4ResourcePool fs4ResourcePool) { - this(dispatchConfig.minActivedocsPercentage(), toNodes(dispatchConfig), fs4ResourcePool); + public SearchCluster(DispatchConfig dispatchConfig, FS4ResourcePool fs4ResourcePool, + int containerClusterSize, VipStatus vipStatus) { + this(dispatchConfig.minActivedocsPercentage(), toNodes(dispatchConfig), fs4ResourcePool, + containerClusterSize, vipStatus); } - public SearchCluster(double minActivedocsCoverage, List<Node> nodes, FS4ResourcePool fs4ResourcePool) { + public SearchCluster(double minActivedocsCoverage, List<Node> nodes, FS4ResourcePool fs4ResourcePool, + int containerClusterSize, VipStatus vipStatus) { this.minActivedocsCoveragePercentage = minActivedocsCoverage; - size = nodes.size(); + this.size = nodes.size(); this.fs4ResourcePool = fs4ResourcePool; - + this.vipStatus = vipStatus; + // Create groups ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>(); for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) @@ -77,8 +96,39 @@ public class SearchCluster implements NodeManager<SearchCluster.Node> { clusterMonitor = new ClusterMonitor<>(this); for (Node node : nodes) clusterMonitor.add(node, true); + + this.directDispatchTarget = findDirectDispatchTarget(HostName.getLocalhost(), size, containerClusterSize, + nodesByHost, groups); } - + + private static Optional<Node> findDirectDispatchTarget(String selfHostname, + int searchClusterSize, + int containerClusterSize, + ImmutableMultimap<String, Node>nodesByHost, + ImmutableMap<Integer, Group> groups) { + // A search node in the search cluster in question is configured on the same host as the currently running container. + // It has all the data <==> No other nodes in the search cluster have the same group id as this node. + // That local search node responds. + // The search cluster to be searched has at least as many nodes as the container cluster we're running in. + ImmutableCollection<Node> localSearchNodes = nodesByHost.get(selfHostname); + // Only use direct dispatch if we have exactly 1 search node on the same machine: + if (localSearchNodes.size() != 1) return Optional.empty(); + + SearchCluster.Node localSearchNode = localSearchNodes.iterator().next(); + SearchCluster.Group localSearchGroup = groups.get(localSearchNode.group()); + + // Only use direct dispatch if the local search node has the entire corpus + if (localSearchGroup.nodes().size() != 1) return Optional.empty(); + + // Only use direct dispatch if this container cluster has at least as many nodes as the search cluster + // to avoid load skew/preserve fanout in the case where a subset of the search nodes are also containers. + // This disregards the case where the search and container clusters are partially overlapping. + // Such configurations produce skewed load in any case. + if (containerClusterSize < searchClusterSize) return Optional.empty(); + + return Optional.of(localSearchNode); + } + private static ImmutableList<Node> toNodes(DispatchConfig dispatchConfig) { ImmutableList.Builder<Node> nodesBuilder = new ImmutableList.Builder<>(); for (DispatchConfig.Node node : dispatchConfig.node()) @@ -98,13 +148,63 @@ public class SearchCluster implements NodeManager<SearchCluster.Node> { */ public ImmutableMultimap<String, Node> nodesByHost() { return nodesByHost; } + /** + * Returns the recipient we should dispatch queries directly to (bypassing fdispatch), + * or empty if we should not dispatch directly. + */ + public Optional<Node> directDispatchTarget() { + if ( ! directDispatchTarget.isPresent()) return Optional.empty(); + + // Only use direct dispatch if the local group has sufficient coverage + SearchCluster.Group localSearchGroup = groups.get(directDispatchTarget.get().group()); + if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty(); + + // Only use direct dispatch if the local search node is up + if ( ! directDispatchTarget.get().isWorking()) return Optional.empty(); + + return directDispatchTarget; + } + /** Used by the cluster monitor to manage node status */ @Override - public void working(Node node) { node.setWorking(true); } + public void working(Node node) { + node.setWorking(true); + + if (usesDirectDispatchTo(node)) + vipStatus.addToRotation(this); + } /** Used by the cluster monitor to manage node status */ @Override - public void failed(Node node) { node.setWorking(false); } + public void failed(Node node) { + node.setWorking(false); + + // Take ourselves out if we usually dispatch only to our own host + if (usesDirectDispatchTo(node)) + vipStatus.removeFromRotation(this); + } + + private void updateSufficientCoverage(Group group, boolean sufficientCoverage) { + // update VIP status if we direct dispatch to this group and coverage status changed + if (usesDirectDispatchTo(group) && sufficientCoverage != group.hasSufficientCoverage()) { + if (sufficientCoverage) + vipStatus.addToRotation(this); + else + vipStatus.removeFromRotation(this); + } + + group.setHasSufficientCoverage(sufficientCoverage); + } + + private boolean usesDirectDispatchTo(Node node) { + if ( ! directDispatchTarget.isPresent()) return false; + return directDispatchTarget.get().equals(node); + } + + private boolean usesDirectDispatchTo(Group group) { + if ( ! directDispatchTarget.isPresent()) return false; + return directDispatchTarget.get().group() == group.id(); + } /** Used by the cluster monitor to manage node status */ @Override @@ -132,7 +232,7 @@ public class SearchCluster implements NodeManager<SearchCluster.Node> { for (Group group : groups.values()) group.aggregateActiveDocuments(); if (groups.size() == 1) { - groups.values().iterator().next().setHasSufficientCoverage(true); // by definition + updateSufficientCoverage(groups.values().iterator().next(), true); // by definition } else { for (Group currentGroup : groups.values()) { long sumOfAactiveDocumentsInOtherGroups = 0; @@ -141,10 +241,10 @@ public class SearchCluster implements NodeManager<SearchCluster.Node> { sumOfAactiveDocumentsInOtherGroups += otherGroup.getActiveDocuments(); long averageDocumentsInOtherGroups = sumOfAactiveDocumentsInOtherGroups / (groups.size() - 1); if (averageDocumentsInOtherGroups == 0) - currentGroup.setHasSufficientCoverage(true); // no information about any group; assume coverage + updateSufficientCoverage(currentGroup, true); // no information about any group; assume coverage else - currentGroup.setHasSufficientCoverage( - 100 * (double) currentGroup.getActiveDocuments() / averageDocumentsInOtherGroups > minActivedocsCoveragePercentage); + updateSufficientCoverage(currentGroup, + 100 * (double) currentGroup.getActiveDocuments() / averageDocumentsInOtherGroups > minActivedocsCoveragePercentage); } } } @@ -200,7 +300,7 @@ public class SearchCluster implements NodeManager<SearchCluster.Node> { this.nodes = ImmutableList.copyOf(nodes); } - /** Returns the id of this group */ + /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ @@ -218,7 +318,6 @@ public class SearchCluster implements NodeManager<SearchCluster.Node> { hasSufficientCoverage.lazySet(sufficientCoverage); } - void aggregateActiveDocuments() { long activeDocumentsInGroup = 0; for (Node node : nodes) @@ -235,6 +334,16 @@ public class SearchCluster implements NodeManager<SearchCluster.Node> { @Override public String toString() { return "search group " + id; } + @Override + public int hashCode() { return id; } + + @Override + public boolean equals(Object other) { + if (other == this) return true; + if (!(other instanceof Group)) return false; + return ((Group) other).id == this.id; + } + } /** A node in a search cluster. This class is multithread safe. */ diff --git a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java index c431fdac638..56bd3fc57b3 100644 --- a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java +++ b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java @@ -4,6 +4,7 @@ package com.yahoo.search.handler; import com.google.inject.Inject; import com.yahoo.collections.Tuple2; import com.yahoo.component.ComponentSpecification; +import com.yahoo.component.Vtag; import com.yahoo.component.chain.Chain; import com.yahoo.component.chain.ChainsConfigurer; import com.yahoo.component.chain.model.ChainsModel; @@ -25,7 +26,6 @@ import com.yahoo.log.LogLevel; import com.yahoo.net.UriTools; import com.yahoo.prelude.IndexFacts; import com.yahoo.prelude.IndexModel; -import com.yahoo.prelude.VespaSVersionRetriever; import com.yahoo.prelude.query.QueryException; import com.yahoo.prelude.query.parser.ParseException; import com.yahoo.prelude.query.parser.SpecialTokenRegistry; @@ -64,7 +64,7 @@ import java.util.logging.Logger; /** * Handles search request. * - * @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a> + * @author Steinar Knutsen */ public class SearchHandler extends LoggingRequestHandler { @@ -106,8 +106,9 @@ public class SearchHandler extends LoggingRequestHandler { private final CompiledQueryProfileRegistry queryProfileRegistry; private final class MeanConnections implements Callback { + @Override - public void run(final Handle h, final boolean firstTime) { + public void run(Handle h, boolean firstTime) { if (firstTime) { metric.set(SEARCH_CONNECTIONS, 0.0d, null); return; @@ -193,13 +194,11 @@ public class SearchHandler extends LoggingRequestHandler { try { try { return handleBody(request); - } catch (final QueryException e) { + } catch (QueryException e) { return (e.getCause() instanceof IllegalArgumentException) ? invalidParameterResponse(request, e) : illegalQueryResponse(request, e); - } catch (final RuntimeException e) { // Make sure we generate a valid - // XML response even on unexpected - // errors + } catch (RuntimeException e) { // Make sure we generate a valid XML response even on unexpected errors log.log(Level.WARNING, "Failed handling " + request, e); return internalServerErrorResponse(request, e); } @@ -350,7 +349,6 @@ public class SearchHandler extends LoggingRequestHandler { Execution execution = new Execution(searchChain, new Execution.Context(registry, indexFacts, specialTokens, rendererRegistry, linguistics)); query.getModel().setExecution(execution); - query.getModel().traceLanguage(); execution.trace().setForceTimestamps(query.properties().getBoolean(FORCE_TIMESTAMPS, false)); if (query.properties().getBoolean(DETAILED_TIMING_LOGGING, false)) { // check and set (instead of set directly) to avoid overwriting stuff from prepareForBreakdownAnalysis() @@ -365,7 +363,7 @@ public class SearchHandler extends LoggingRequestHandler { execution.fill(result, result.getQuery().getPresentation().getSummary()); traceExecutionTimes(query, result); - traceVespaSVersion(query); + traceVespaVersion(query); traceRequestAttributes(query); return result; } @@ -379,7 +377,10 @@ public class SearchHandler extends LoggingRequestHandler { /** * For internal use only + * + * @deprecated remove on Vespa 7 */ + @Deprecated public Renderer<Result> getRendererCopy(ComponentSpecification spec) { // TODO: Deprecate this Renderer<Result> renderer = rendererRegistry.getRenderer(spec); return perRenderingCopy(renderer); @@ -510,19 +511,15 @@ public class SearchHandler extends LoggingRequestHandler { ElapsedTime elapsedTime = result.getElapsedTime(); long now = System.currentTimeMillis(); if (elapsedTime.firstFill() != 0) { - query.trace("Query time " + query + ": " - + (elapsedTime.firstFill() - elapsedTime.first()) + " ms", false, 3); - - query.trace("Summary fetch time " + query + ": " - + (now - elapsedTime.firstFill()) + " ms", false, 3); + query.trace("Query time " + query + ": " + (elapsedTime.firstFill() - elapsedTime.first()) + " ms", false, 3); + query.trace("Summary fetch time " + query + ": " + (now - elapsedTime.firstFill()) + " ms", false, 3); } else { - query.trace("Total search time " + query + ": " - + (now - elapsedTime.first()) + " ms", false, 3); + query.trace("Total search time " + query + ": " + (now - elapsedTime.first()) + " ms", false, 3); } } - private void traceVespaSVersion(Query query) { - query.trace("Vespa version: " + VespaSVersionRetriever.getVersion(), false, 4); + private void traceVespaVersion(Query query) { + query.trace("Vespa version: " + Vtag.currentVersion.toString(), false, 4); } public SearchChainRegistry getSearchChainRegistry() { diff --git a/container-search/src/main/java/com/yahoo/search/query/Model.java b/container-search/src/main/java/com/yahoo/search/query/Model.java index c6000e3d86b..bf0939d17c1 100644 --- a/container-search/src/main/java/com/yahoo/search/query/Model.java +++ b/container-search/src/main/java/com/yahoo/search/query/Model.java @@ -96,7 +96,10 @@ public class Model implements Cloneable { * creating the query instance, {@link #setExecution(Execution)} has to be * invoked first with the same Execution instance the query is intended to * be run by. + * + * @deprecated do not use; language can now be assigned later and for parts of the query tree, making this quite useless */ + @Deprecated public void traceLanguage() { if (getParent().getTraceLevel()<2) return; if (language != null) { diff --git a/container-search/src/main/java/com/yahoo/search/query/parser/Parsable.java b/container-search/src/main/java/com/yahoo/search/query/parser/Parsable.java index 92601a5464d..f0126b3e866 100644 --- a/container-search/src/main/java/com/yahoo/search/query/parser/Parsable.java +++ b/container-search/src/main/java/com/yahoo/search/query/parser/Parsable.java @@ -6,6 +6,8 @@ import com.yahoo.search.query.Model; import java.util.Collection; import java.util.HashSet; +import java.util.Objects; +import java.util.Optional; import java.util.Set; /** @@ -34,6 +36,7 @@ public final class Parsable { private String filter; private String defaultIndexName; private Language language; + private Optional<Language> explicitLanguage = Optional.empty(); public String getQuery() { return query; @@ -62,15 +65,27 @@ public final class Parsable { return this; } - public Language getLanguage() { - return language; - } + /** + * Returns the language to use when parsing, + * if not decided by the item under parsing. This is never null or UNKNOWN + */ + public Language getLanguage() { return language; } public Parsable setLanguage(Language language) { + Objects.requireNonNull(language, "Language cannot be null"); this.language = language; return this; } + /** Returns the language explicitly set to be used when parsing, or empty if none is set. */ + public Optional<Language> getExplicitLanguage() { return explicitLanguage; } + + public Parsable setExplicitLanguage(Optional<Language> language) { + Objects.requireNonNull(language, "Explicit language cannot be null"); + this.explicitLanguage = language; + return this; + } + public Set<String> getSources() { return sourceList; } @@ -104,6 +119,7 @@ public final class Parsable { .setQuery(model.getQueryString()) .setFilter(model.getFilter()) .setLanguage(model.getParsingLanguage()) + .setExplicitLanguage(Optional.ofNullable(model.getLanguage())) .setDefaultIndexName(model.getDefaultIndex()) .addSources(model.getSources()) .addRestricts(model.getRestrict()); diff --git a/container-search/src/main/java/com/yahoo/search/querytransform/SortingDegrader.java b/container-search/src/main/java/com/yahoo/search/querytransform/SortingDegrader.java index 0c7871eb6e6..2597e440d17 100644 --- a/container-search/src/main/java/com/yahoo/search/querytransform/SortingDegrader.java +++ b/container-search/src/main/java/com/yahoo/search/querytransform/SortingDegrader.java @@ -69,6 +69,8 @@ public class SortingDegrader extends Searcher { } private void setDegradation(Query query) { + query.trace("Using sorting degrading for performance - totalHits will be wrong. " + + "Turn off with sorting.degrading=false.", 2); Sorting.FieldOrder primarySort = query.getRanking().getSorting().fieldOrders().get(0); // ensured above MatchPhase matchPhase = query.getRanking().getMatchPhase(); diff --git a/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java b/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java index 1d400056d52..7df859c6070 100644 --- a/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java +++ b/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java @@ -22,7 +22,6 @@ import org.json.JSONObject; import com.fasterxml.jackson.core.JsonEncoding; import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerationException; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.TreeNode; import com.fasterxml.jackson.databind.ObjectMapper; @@ -77,15 +76,12 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { boolean booleanValue() { switch (this) { - case YES: - return true; - case NO: - return false; - default: - throw new IllegalStateException(); + case YES: return true; + case NO: return false; + default: throw new IllegalStateException(); } } - }; + } // if this must be optimized, simply use com.fasterxml.jackson.core.SerializableString private static final String BUCKET_LIMITS = "limits"; @@ -173,8 +169,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } } - private void doVisit(final long timestamp, final Object payload, final boolean hasChildren) - throws IOException, JsonGenerationException { + private void doVisit(final long timestamp, final Object payload, final boolean hasChildren) throws IOException { boolean dirty = false; if (timestamp != 0L) { header(); @@ -216,7 +211,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } } - private void conditionalStartObject() throws IOException, JsonGenerationException { + private void conditionalStartObject() throws IOException { if (!isInsideOpenObject()) { generator.writeStartObject(); } else { @@ -303,7 +298,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { generator = null; renderedChildren = null; debugRendering = false; - timeSource = () -> System.currentTimeMillis(); + timeSource = System::currentTimeMillis; stream = null; } @@ -320,21 +315,19 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } private void renderTiming() throws IOException { - if (!getResult().getQuery().getPresentation().getTiming()) { - return; - } + if (!getResult().getQuery().getPresentation().getTiming()) return; - final double milli = .001d; - final long now = timeSource.getAsLong(); - final long searchTime = now - getResult().getElapsedTime().first(); - final double searchSeconds = searchTime * milli; + double milli = .001d; + long now = timeSource.getAsLong(); + long searchTime = now - getResult().getElapsedTime().first(); + double searchSeconds = searchTime * milli; generator.writeObjectFieldStart(TIMING); if (getResult().getElapsedTime().firstFill() != 0L) { - final long queryTime = getResult().getElapsedTime().weightedSearchTime(); - final long summaryFetchTime = getResult().getElapsedTime().weightedFillTime(); - final double querySeconds = queryTime * milli; - final double summarySeconds = summaryFetchTime * milli; + long queryTime = getResult().getElapsedTime().weightedSearchTime(); + long summaryFetchTime = getResult().getElapsedTime().weightedFillTime(); + double querySeconds = queryTime * milli; + double summarySeconds = summaryFetchTime * milli; generator.writeNumberField(QUERY_TIME, querySeconds); generator.writeNumberField(SUMMARY_FETCH_TIME, summarySeconds); } @@ -344,18 +337,16 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } private boolean getDebugRendering(Query q) { - return q == null ? false : q.properties().getBoolean(DEBUG_RENDERING_KEY, false); + return q != null && q.properties().getBoolean(DEBUG_RENDERING_KEY, false); } - private void renderTrace(Trace trace) throws JsonGenerationException, IOException { - if (!trace.traceNode().children().iterator().hasNext()) { - return; - } + private void renderTrace(Trace trace) throws IOException { + if (!trace.traceNode().children().iterator().hasNext()) return; + try { long basetime = trace.traceNode().timestamp(); - if (basetime == 0L) { + if (basetime == 0L) basetime = getResult().getElapsedTime().first(); - } trace.accept(new TraceRenderer(basetime)); } catch (TraceRenderWrapper e) { throw new IOException(e); @@ -365,53 +356,49 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { @Override public void beginList(DataList<?> list) throws IOException { Preconditions.checkArgument(list instanceof HitGroup, - "Expected subclass of com.yahoo.search.result.HitGroup, got %s.", - list.getClass()); + "Expected subclass of com.yahoo.search.result.HitGroup, got %s.", + list.getClass()); moreChildren(); - renderHitGroupHead((HitGroup) list); } - protected void moreChildren() throws IOException, JsonGenerationException { - if (!renderedChildren.isEmpty()) { + protected void moreChildren() throws IOException { + if (!renderedChildren.isEmpty()) childrenArray(); - } + renderedChildren.push(0); } - private void childrenArray() throws IOException, JsonGenerationException { - if (renderedChildren.peek() == 0) { + private void childrenArray() throws IOException { + if (renderedChildren.peek() == 0) generator.writeArrayFieldStart(CHILDREN); - } renderedChildren.push(renderedChildren.pop() + 1); } - private void lessChildren() throws IOException, JsonGenerationException { + private void lessChildren() throws IOException { int lastRenderedChildren = renderedChildren.pop(); if (lastRenderedChildren > 0) { generator.writeEndArray(); } } - private void renderHitGroupHead(HitGroup hitGroup) throws JsonGenerationException, IOException { - final ErrorHit errorHit = hitGroup.getErrorHit(); - + private void renderHitGroupHead(HitGroup hitGroup) throws IOException { generator.writeStartObject(); + renderHitContents(hitGroup); - if (getRecursionLevel() == 1) { + if (getRecursionLevel() == 1) renderCoverage(); - } - if (errorHit != null) { + + ErrorHit errorHit = hitGroup.getErrorHit(); + if (errorHit != null) renderErrors(errorHit.errors()); - } // the framework will invoke begin methods as needed from here } - private void renderErrors(Set<ErrorMessage> errors) throws JsonGenerationException, IOException { - if (errors.isEmpty()) { - return; - } + private void renderErrors(Set<ErrorMessage> errors) throws IOException { + if (errors.isEmpty()) return; + generator.writeArrayFieldStart(ERRORS); for (ErrorMessage e : errors) { String summary = e.getMessage(); @@ -441,11 +428,10 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } - private void renderCoverage() throws JsonGenerationException, IOException { + private void renderCoverage() throws IOException { Coverage c = getResult().getCoverage(false); - if (c == null) { - return; - } + if (c == null) return; + generator.writeObjectFieldStart(COVERAGE); generator.writeNumberField(COVERAGE_COVERAGE, c.getResultPercentage()); generator.writeNumberField(COVERAGE_DOCUMENTS, c.getDocs()); @@ -456,10 +442,8 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { generator.writeEndObject(); } - private void renderHit(Hit hit) throws JsonGenerationException, IOException { - if (!shouldRender(hit)) { - return; - } + private void renderHit(Hit hit) throws IOException { + if (!shouldRender(hit)) return; childrenArray(); generator.writeStartObject(); @@ -468,54 +452,45 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } private boolean shouldRender(Hit hit) { - if (hit instanceof DefaultErrorHit) { - return false; - } - - return true; + return ! (hit instanceof DefaultErrorHit); } - private boolean fieldsStart(boolean hasFieldsField) throws JsonGenerationException, IOException { - if (hasFieldsField) { - return true; - } + private boolean fieldsStart(boolean hasFieldsField) throws IOException { + if (hasFieldsField) return true; generator.writeObjectFieldStart(FIELDS); return true; } - private void fieldsEnd(boolean hasFieldsField) throws JsonGenerationException, IOException { - if (!hasFieldsField) { - return; - } + private void fieldsEnd(boolean hasFieldsField) throws IOException { + if (!hasFieldsField) return; generator.writeEndObject(); } - private void renderHitContents(Hit hit) throws JsonGenerationException, IOException { + private void renderHitContents(Hit hit) throws IOException { String id = hit.getDisplayId(); - Set<String> types = hit.types(); - String source = hit.getSource(); - - if (id != null) { + if (id != null) generator.writeStringField(ID, id); - } + generator.writeNumberField(RELEVANCE, hit.getRelevance().getScore()); - if (types.size() > 0) { + + if (hit.types().size() > 0) { // TODO: Remove types rendering on Vespa 7 generator.writeArrayFieldStart(TYPES); - for (String t : types) { + for (String t : hit.types()) { generator.writeString(t); } generator.writeEndArray(); } - if (source != null) { + + String source = hit.getSource(); + if (source != null) generator.writeStringField(SOURCE, hit.getSource()); - } + renderSpecialCasesForGrouping(hit); renderAllFields(hit); } - private void renderAllFields(Hit hit) throws JsonGenerationException, - IOException { + private void renderAllFields(Hit hit) throws IOException { boolean hasFieldsField = false; hasFieldsField |= renderTotalHitCount(hit, hasFieldsField); @@ -523,8 +498,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { fieldsEnd(hasFieldsField); } - private boolean renderStandardFields(Hit hit, boolean initialHasFieldsField) - throws JsonGenerationException, IOException { + private boolean renderStandardFields(Hit hit, boolean initialHasFieldsField) throws IOException { boolean hasFieldsField = initialHasFieldsField; for (String fieldName : hit.fieldKeys()) { if (!shouldRender(fieldName, hit)) continue; @@ -538,55 +512,39 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } private boolean shouldRender(String fieldName, Hit hit) { - if (debugRendering) { - return true; - } - if (fieldName.startsWith(VESPA_HIDDEN_FIELD_PREFIX)) { - return false; - } + if (debugRendering) return true; + + if (fieldName.startsWith(VESPA_HIDDEN_FIELD_PREFIX)) return false; RenderDecision r = lazyRenderAwareCheck(fieldName, hit); - if (r != RenderDecision.DO_NOT_KNOW) { - return r.booleanValue(); - } + if (r != RenderDecision.DO_NOT_KNOW) return r.booleanValue(); // this will trigger field decoding, so it is important the lazy decoding magic is done first Object field = hit.getField(fieldName); - if (field instanceof CharSequence && ((CharSequence) field).length() == 0) { - return false; - } - if (field instanceof StringFieldValue && ((StringFieldValue) field).getString().isEmpty()) { - // StringFieldValue cannot hold a null, so checking length directly is OK - return false; - } - if (field instanceof NanNumber) { - return false; - } + if (field instanceof CharSequence && ((CharSequence) field).length() == 0) return false; + + // StringFieldValue cannot hold a null, so checking length directly is OK: + if (field instanceof StringFieldValue && ((StringFieldValue) field).getString().isEmpty()) return false; + + if (field instanceof NanNumber) return false; return true; } private RenderDecision lazyRenderAwareCheck(String fieldName, Hit hit) { - if (!(hit instanceof FastHit)) return RenderDecision.DO_NOT_KNOW; + if ( ! (hit instanceof FastHit)) return RenderDecision.DO_NOT_KNOW; FastHit asFastHit = (FastHit) hit; if (asFastHit.fieldIsNotDecoded(fieldName)) { - FastHit.RawField r = asFastHit.fetchFieldAsUtf8(fieldName); - if (r != null) { - byte[] utf8 = r.getUtf8(); - if (utf8.length == 0) { - return RenderDecision.NO; - } else { - return RenderDecision.YES; - } - } + FastHit.RawField rawField = asFastHit.fetchFieldAsUtf8(fieldName); + if (rawField != null) + return rawField.getUtf8().length == 0 ? RenderDecision.NO : RenderDecision.YES; } return RenderDecision.DO_NOT_KNOW; } - private void renderSpecialCasesForGrouping(Hit hit) - throws JsonGenerationException, IOException { + private void renderSpecialCasesForGrouping(Hit hit) throws IOException { if (hit instanceof AbstractList) { renderGroupingListSyntheticFields((AbstractList) hit); } else if (hit instanceof Group) { @@ -594,8 +552,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } } - private void renderGroupingGroupSyntheticFields(Hit hit) - throws JsonGenerationException, IOException { + private void renderGroupingGroupSyntheticFields(Hit hit) throws IOException { renderGroupMetadata(((Group) hit).getGroupId()); if (hit instanceof RootGroup) { renderContinuations(Collections.singletonMap( @@ -603,22 +560,18 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } } - private void renderGroupingListSyntheticFields(AbstractList a) - throws JsonGenerationException, IOException { + private void renderGroupingListSyntheticFields(AbstractList a) throws IOException { writeGroupingLabel(a); renderContinuations(a.continuations()); } - private void writeGroupingLabel(AbstractList a) - throws JsonGenerationException, IOException { + private void writeGroupingLabel(AbstractList a) throws IOException { generator.writeStringField(LABEL, a.getLabel()); } - private void renderContinuations(Map<String, Continuation> continuations) - throws JsonGenerationException, IOException { - if (continuations.isEmpty()) { - return; - } + private void renderContinuations(Map<String, Continuation> continuations) throws IOException { + if (continuations.isEmpty()) return; + generator.writeObjectFieldStart(CONTINUATION); for (Map.Entry<String, Continuation> e : continuations.entrySet()) { generator.writeStringField(e.getKey(), e.getValue().toString()); @@ -626,17 +579,14 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { generator.writeEndObject(); } - private void renderGroupMetadata(GroupId id) throws JsonGenerationException, - IOException { - if (!(id instanceof ValueGroupId || id instanceof BucketGroupId)) { - return; - } + private void renderGroupMetadata(GroupId id) throws IOException { + if (!(id instanceof ValueGroupId || id instanceof BucketGroupId)) return; if (id instanceof ValueGroupId) { - final ValueGroupId<?> valueId = (ValueGroupId<?>) id; + ValueGroupId<?> valueId = (ValueGroupId<?>) id; generator.writeStringField(GROUPING_VALUE, getIdValue(valueId)); - } else if (id instanceof BucketGroupId) { - final BucketGroupId<?> bucketId = (BucketGroupId<?>) id; + } else { + BucketGroupId<?> bucketId = (BucketGroupId<?>) id; generator.writeObjectFieldStart(BUCKET_LIMITS); generator.writeStringField(BUCKET_FROM, getBucketFrom(bucketId)); generator.writeStringField(BUCKET_TO, getBucketTo(bucketId)); @@ -645,40 +595,33 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } private static String getIdValue(ValueGroupId<?> id) { - return (id instanceof RawId ? Arrays.toString(((RawId) id).getValue()) - : id.getValue()).toString(); + return (id instanceof RawId ? Arrays.toString(((RawId) id).getValue()) : id.getValue()).toString(); } private static String getBucketFrom(BucketGroupId<?> id) { - return (id instanceof RawBucketId ? Arrays.toString(((RawBucketId) id) - .getFrom()) : id.getFrom()).toString(); + return (id instanceof RawBucketId ? Arrays.toString(((RawBucketId) id).getFrom()) : id.getFrom()).toString(); } private static String getBucketTo(BucketGroupId<?> id) { - return (id instanceof RawBucketId ? Arrays.toString(((RawBucketId) id) - .getTo()) : id.getTo()).toString(); + return (id instanceof RawBucketId ? Arrays.toString(((RawBucketId) id).getTo()) : id.getTo()).toString(); } - private boolean renderTotalHitCount(Hit hit, boolean hasFieldsField) - throws JsonGenerationException, IOException { - if (getRecursionLevel() == 1 && hit instanceof HitGroup) { - fieldsStart(hasFieldsField); - generator.writeNumberField(TOTAL_COUNT, getResult() - .getTotalHitCount()); - return true; - } else { - return false; - } + private boolean renderTotalHitCount(Hit hit, boolean hasFieldsField) throws IOException { + if ( ! (getRecursionLevel() == 1 && hit instanceof HitGroup)) return false; + + fieldsStart(hasFieldsField); + generator.writeNumberField(TOTAL_COUNT, getResult().getTotalHitCount()); + return true; } - private void renderField(String fieldName, Hit hit) throws JsonGenerationException, IOException { + private void renderField(String fieldName, Hit hit) throws IOException { generator.writeFieldName(fieldName); - if (!tryDirectRendering(fieldName, hit)) { + if ( ! tryDirectRendering(fieldName, hit)) { renderFieldContents(hit.getField(fieldName)); } } - private void renderFieldContents(Object field) throws JsonGenerationException, IOException { + private void renderFieldContents(Object field) throws IOException { if (field == null) { generator.writeNull(); } else if (field instanceof Number) { @@ -711,7 +654,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { } } - private void renderNumberField(Number field) throws JsonGenerationException, IOException { + private void renderNumberField(Number field) throws IOException { if (field instanceof Integer) { generator.writeNumber(field.intValue()); } else if (field instanceof Float) { @@ -734,8 +677,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { /** * Really a private method, but package access for testability. */ - boolean tryDirectRendering(String fieldName, Hit hit) - throws IOException, JsonGenerationException { + boolean tryDirectRendering(String fieldName, Hit hit) throws IOException { boolean renderedAsUtf8 = false; if (hit instanceof FastHit) { FastHit f = (FastHit) hit; @@ -755,8 +697,8 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { @Override public void data(Data data) throws IOException { Preconditions.checkArgument(data instanceof Hit, - "Expected subclass of com.yahoo.search.result.Hit, got %s.", - data.getClass()); + "Expected subclass of com.yahoo.search.result.Hit, got %s.", + data.getClass()); renderHit((Hit) data); } @@ -785,8 +727,8 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { private Result getResult() { Response r = getResponse(); Preconditions.checkArgument(r instanceof Result, - "JsonRenderer can only render instances of com.yahoo.search.Result, got instance of %s.", - r.getClass()); + "JsonRenderer can only render instances of com.yahoo.search.Result, got instance of %s.", + r.getClass()); return (Result) r; } @@ -841,4 +783,5 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> { void setTimeSource(LongSupplier timeSource) { this.timeSource = timeSource; } + } diff --git a/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java b/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java index bace3b0d9d4..5e8851bc5b3 100644 --- a/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java +++ b/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java @@ -10,6 +10,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.StringTokenizer; @@ -656,7 +657,7 @@ public class YqlParser implements Parser { Query.Type.ALL.toString(), "grammar for handling user input"); String defaultIndex = getAnnotation(ast, USER_INPUT_DEFAULT_INDEX, String.class, "default", "default index for user input terms"); - Language language = decideUserInputLanguage(ast, wordData); + Language language = decideParsingLanguage(ast, wordData); Item item; if (USER_INPUT_RAW.equals(grammar)) { item = instantiateWordItem(defaultIndex, wordData, ast, null, SegmentWhen.NEVER, language); @@ -666,17 +667,22 @@ public class YqlParser implements Parser { item = parseUserInput(grammar, defaultIndex, wordData, language, allowEmpty); propagateUserInputAnnotations(ast, item); } - item.setLanguage(language); return item; } - private Language decideUserInputLanguage(OperatorNode<ExpressionOperator> ast, String wordData) { + private Language decideParsingLanguage(OperatorNode<ExpressionOperator> ast, String wordData) { String languageTag = getAnnotation(ast, USER_INPUT_LANGUAGE, String.class, null, - "language setting for segmenting user input parameter"); + "language setting for segmenting query section"); + Language language = Language.fromLanguageTag(languageTag); if (language != Language.UNKNOWN) return language; + + Optional<Language> explicitLanguage = currentlyParsing.getExplicitLanguage(); + if (explicitLanguage.isPresent()) return explicitLanguage.get(); + language = detector.detect(wordData, null).getLanguage(); if (language != Language.UNKNOWN) return language; + return Language.ENGLISH; } @@ -711,6 +717,9 @@ public class YqlParser implements Parser { // the null check should be unnecessary, but is there to avoid having to suppress null warnings if ( !allowNullItem && (item == null || item instanceof NullItem)) throw new IllegalArgumentException("Parsing '" + wordData + "' only resulted in NullItem."); + + if (language != Language.ENGLISH) // mark the language used, unless it's the default + item.setLanguage(language); return item; } @@ -1037,11 +1046,8 @@ public class YqlParser implements Parser { } @NonNull - private CompositeItem convertVarArgs(OperatorNode<ExpressionOperator> ast, - int argIdx, @NonNull - CompositeItem out) { - Iterable<OperatorNode<ExpressionOperator>> args = ast - .getArgument(argIdx); + private CompositeItem convertVarArgs(OperatorNode<ExpressionOperator> ast, int argIdx, @NonNull CompositeItem out) { + Iterable<OperatorNode<ExpressionOperator>> args = ast.getArgument(argIdx); for (OperatorNode<ExpressionOperator> arg : args) { assertHasOperator(arg, ExpressionOperator.class); out.addItem(convertExpression(arg)); @@ -1049,10 +1055,8 @@ public class YqlParser implements Parser { return out; } - private void convertVarArgsAnd(OperatorNode<ExpressionOperator> ast, - int argIdx, AndItem outAnd, NotItem outNot) { - Iterable<OperatorNode<ExpressionOperator>> args = ast - .getArgument(argIdx); + private void convertVarArgsAnd(OperatorNode<ExpressionOperator> ast, int argIdx, AndItem outAnd, NotItem outNot) { + Iterable<OperatorNode<ExpressionOperator>> args = ast.getArgument(argIdx); for (OperatorNode<ExpressionOperator> arg : args) { assertHasOperator(arg, ExpressionOperator.class); if (arg.getOperator() == ExpressionOperator.NOT) { @@ -1087,28 +1091,25 @@ public class YqlParser implements Parser { assertHasOperator(spec, ExpressionOperator.CALL); assertHasFunctionName(spec, RANGE); - IntItem range = instantiateRangeItem( - spec.<List<OperatorNode<ExpressionOperator>>> getArgument(1), - spec); + IntItem range = instantiateRangeItem(spec.<List<OperatorNode<ExpressionOperator>>> getArgument(1), spec); return leafStyleSettings(spec, range); } private static Number negate(Number x) { if (x.getClass() == Integer.class) { int x1 = x.intValue(); - return Integer.valueOf(-x1); + return -x1; } else if (x.getClass() == Long.class) { long x1 = x.longValue(); - return Long.valueOf(-x1); + return -x1; } else if (x.getClass() == Float.class) { float x1 = x.floatValue(); - return Float.valueOf(-x1); + return -x1; } else if (x.getClass() == Double.class) { double x1 = x.doubleValue(); - return Double.valueOf(-x1); + return -x1; } else { - throw newUnexpectedArgumentException(x.getClass(), Integer.class, - Long.class, Float.class, Double.class); + throw newUnexpectedArgumentException(x.getClass(), Integer.class, Long.class, Float.class, Double.class); } } @@ -1199,23 +1200,19 @@ public class YqlParser implements Parser { private Item instantiateWordAlternativesItem(String field, OperatorNode<ExpressionOperator> ast) { List<OperatorNode<ExpressionOperator>> args = ast.getArgument(1); Preconditions.checkArgument(args.size() >= 1, "Expected 1 or more arguments, got %s.", args.size()); - Preconditions.checkArgument(args.get(0).getOperator() == ExpressionOperator.MAP, "Expected MAP, got %s.", args.get(0) - .getOperator()); + Preconditions.checkArgument(args.get(0).getOperator() == ExpressionOperator.MAP, "Expected MAP, got %s.", + args.get(0).getOperator()); List<WordAlternativesItem.Alternative> terms = new ArrayList<>(); List<String> keys = args.get(0).getArgument(0); List<OperatorNode<ExpressionOperator>> values = args.get(0).getArgument(1); for (int i = 0; i < keys.size(); ++i) { - String term = keys.get(i); - double exactness; OperatorNode<ExpressionOperator> value = values.get(i); - switch (value.getOperator()) { - case LITERAL: - exactness = value.getArgument(0, Double.class); - break; - default: + if (value.getOperator() != ExpressionOperator.LITERAL) throw newUnexpectedArgumentException(value.getOperator(), ExpressionOperator.LITERAL); - } + + String term = keys.get(i); + double exactness = value.getArgument(0, Double.class); terms.add(new WordAlternativesItem.Alternative(term, exactness)); } Substring origin = getOrigin(ast); @@ -1225,54 +1222,51 @@ public class YqlParser implements Parser { } @NonNull - private Item instantiateEquivItem(String field, - OperatorNode<ExpressionOperator> ast) { + private Item instantiateEquivItem(String field, OperatorNode<ExpressionOperator> ast) { List<OperatorNode<ExpressionOperator>> args = ast.getArgument(1); - Preconditions.checkArgument(args.size() >= 2, - "Expected 2 or more arguments, got %s.", args.size()); + Preconditions.checkArgument(args.size() >= 2, "Expected 2 or more arguments, got %s.", args.size()); EquivItem equiv = new EquivItem(); equiv.setIndexName(field); for (OperatorNode<ExpressionOperator> arg : args) { switch (arg.getOperator()) { - case LITERAL: - equiv.addItem(instantiateWordItem(field, arg, equiv.getClass())); - break; - case CALL: - assertHasFunctionName(arg, PHRASE); - equiv.addItem(instantiatePhraseItem(field, arg)); - break; - default: - throw newUnexpectedArgumentException(arg.getOperator(), - ExpressionOperator.CALL, ExpressionOperator.LITERAL); + case LITERAL: + equiv.addItem(instantiateWordItem(field, arg, equiv.getClass())); + break; + case CALL: + assertHasFunctionName(arg, PHRASE); + equiv.addItem(instantiatePhraseItem(field, arg)); + break; + default: + throw newUnexpectedArgumentException(arg.getOperator(), + ExpressionOperator.CALL, ExpressionOperator.LITERAL); } } return leafStyleSettings(ast, equiv); } @NonNull - private Item instantiateWordItem(String field, - OperatorNode<ExpressionOperator> ast, Class<?> parent) { + private Item instantiateWordItem(String field, OperatorNode<ExpressionOperator> ast, Class<?> parent) { return instantiateWordItem(field, ast, parent, SegmentWhen.POSSIBLY); } @NonNull - private Item instantiateWordItem(String field, - OperatorNode<ExpressionOperator> ast, Class<?> parent, - SegmentWhen segmentPolicy) { + private Item instantiateWordItem(String field, + OperatorNode<ExpressionOperator> ast, Class<?> parent, + SegmentWhen segmentPolicy) { String wordData = getStringContents(ast); - return instantiateWordItem(field, wordData, ast, parent, - segmentPolicy, null); + return instantiateWordItem(field, wordData, ast, parent, segmentPolicy, decideParsingLanguage(ast, wordData)); } @NonNull private Item instantiateWordItem(String field, - String rawWord, - OperatorNode<ExpressionOperator> ast, Class<?> parent, - SegmentWhen segmentPolicy, Language language) { + String rawWord, + OperatorNode<ExpressionOperator> ast, Class<?> parent, + SegmentWhen segmentPolicy, + Language language) { String wordData = rawWord; if (getAnnotation(ast, NFKC, Boolean.class, Boolean.TRUE, - "setting for whether to NFKC normalize input data")) { + "setting for whether to NFKC normalize input data")) { wordData = normalizer.normalize(wordData); } boolean fromQuery = getAnnotation(ast, IMPLICIT_TRANSFORMS, @@ -1320,6 +1314,8 @@ public class YqlParser implements Parser { if (wordItem instanceof WordItem) { prepareWord(field, ast, fromQuery, (WordItem) wordItem); } + if (language != Language.ENGLISH) // mark the language used, unless it's the default + ((Item)wordItem).setLanguage(language); return (Item) leafStyleSettings(ast, wordItem); } diff --git a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java index 3917d353630..e27893a2b20 100644 --- a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java +++ b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java @@ -219,12 +219,8 @@ public class VdsStreamingSearcher extends VespaBackEndSearcher { private FastHit buildSummaryHit(Query query, SearchResult.Hit hit) { FastHit fastHit = new FastHit(); fastHit.setQuery(query); - fastHit.setSource("VdsStreamingSearcher"); + fastHit.setSource(getName()); fastHit.setId(hit.getDocId()); - // TODO: remove seField("uri", ...), just a helper for Velocity templates - fastHit.setField("uri", hit.getDocId()); - fastHit.types().add("summary"); - fastHit.setRelevance(new Relevance(hit.getRank())); fastHit.setFillable(); diff --git a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/DirectSearchTestCase.java b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/DirectSearchTestCase.java index 7e935b90386..3cc7a07cbf7 100644 --- a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/DirectSearchTestCase.java +++ b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/DirectSearchTestCase.java @@ -3,6 +3,8 @@ package com.yahoo.prelude.fastsearch.test; import org.junit.Test; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; /** * Tests that FastSearcher will bypass dispatch when the conditions are right @@ -56,11 +58,14 @@ public class DirectSearchTestCase { @Test public void testNoDirectSearchWhenLocalNodeIsDown() { FastSearcherTester tester = new FastSearcherTester(2, FastSearcherTester.selfHostname + ":9999:0", "otherhost:9999:1"); + assertTrue(tester.vipStatus().isInRotation()); tester.setResponding(FastSearcherTester.selfHostname, false); + assertFalse(tester.vipStatus().isInRotation()); assertEquals("1 ping request, 0 search requests", 1, tester.requestCount(FastSearcherTester.selfHostname, 9999)); tester.search("?query=test&dispatch.direct=true&nocache"); assertEquals("1 ping request, 0 search requests", 1, tester.requestCount(FastSearcherTester.selfHostname, 9999)); tester.setResponding(FastSearcherTester.selfHostname, true); + assertTrue(tester.vipStatus().isInRotation()); assertEquals("2 ping requests, 0 search request", 2, tester.requestCount(FastSearcherTester.selfHostname, 9999)); tester.search("?query=test&dispatch.direct=true&nocache"); assertEquals("2 ping requests, 1 search request", 3, tester.requestCount(FastSearcherTester.selfHostname, 9999)); @@ -81,25 +86,28 @@ public class DirectSearchTestCase { tester.search("?query=test&dispatch.direct=true&nocache"); assertEquals("Still 1 ping request, 0 search requests because the default coverage is 97%, and we only have 96% locally", 1, tester.requestCount(FastSearcherTester.selfHostname, 9999)); + assertFalse(tester.vipStatus().isInRotation()); tester.setActiveDocuments(FastSearcherTester.selfHostname, (long) (99 * k)); assertEquals("2 ping request, 0 search requests", 2, tester.requestCount(FastSearcherTester.selfHostname, 9999)); tester.search("?query=test&dispatch.direct=true&nocache"); assertEquals("2 ping request, 1 search requests because we now have 99% locally", 3, tester.requestCount(FastSearcherTester.selfHostname, 9999)); - + assertTrue(tester.vipStatus().isInRotation()); tester.setActiveDocuments("host1", (long) (104 * k)); assertEquals("2 ping request, 1 search requests", 3, tester.requestCount(FastSearcherTester.selfHostname, 9999)); tester.search("?query=test&dispatch.direct=true&nocache"); assertEquals("2 ping request, 2 search requests because 99/((104+100)/2) > 0.97", 4, tester.requestCount(FastSearcherTester.selfHostname, 9999)); + assertTrue(tester.vipStatus().isInRotation()); tester.setActiveDocuments("host2", (long) (102 * k)); assertEquals("2 ping request, 2 search requests", 4, tester.requestCount(FastSearcherTester.selfHostname, 9999)); tester.search("?query=test&dispatch.direct=true&nocache"); assertEquals("Still 2 ping request, 2 search requests because 99/((104+102)/2) < 0.97", 4, tester.requestCount(FastSearcherTester.selfHostname, 9999)); + assertFalse(tester.vipStatus().isInRotation()); } @Test diff --git a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTestCase.java b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTestCase.java index b052622ccd3..8dd56ad15a1 100644 --- a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTestCase.java +++ b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTestCase.java @@ -63,8 +63,7 @@ public class FastSearcherTestCase { new SummaryParameters(null), new ClusterParams("testhittype"), new CacheParams(100, 1e64), - documentdbInfoConfig, - 1); + documentdbInfoConfig); MockFSChannel.setEmptyDocsums(false); @@ -85,8 +84,7 @@ public class FastSearcherTestCase { new SummaryParameters(null), new ClusterParams("testhittype"), new CacheParams(100, 1e64), - documentdbInfoConfig, - 1); + documentdbInfoConfig); String query = "?junkparam=ignored"; Result result = doSearch(fastSearcher,new Query(query), 0, 10); @@ -109,8 +107,7 @@ public class FastSearcherTestCase { new SummaryParameters(null), new ClusterParams("testhittype"), new CacheParams(100, 1e64), - documentdbConfigWithOneDb, - 1); + documentdbConfigWithOneDb); Query query = new Query("?query=foo&model.restrict=testDb"); query.prepare(); @@ -298,8 +295,7 @@ public class FastSearcherTestCase { new SummaryParameters(null), new ClusterParams("testhittype"), new CacheParams(100, 1e64), - config, - 1); + config); } @Ignore @@ -312,8 +308,7 @@ public class FastSearcherTestCase { new SummaryParameters(null), new ClusterParams("testhittype"), new CacheParams(100, 1e64), - documentdbInfoConfig, - 1); + documentdbInfoConfig); CacheControl c = fastSearcher.getCacheControl(); @@ -356,8 +351,7 @@ public class FastSearcherTestCase { new SummaryParameters(null), new ClusterParams("testhittype"), new CacheParams(100, 1e64), - documentdbInfoConfig, - 1); + documentdbInfoConfig); Result result = doSearch(fastSearcher,new Query("?query=ignored"), 0, 2); result = doSearch(fastSearcher,new Query("?query=ignored"), 1, 1); @@ -392,8 +386,7 @@ public class FastSearcherTestCase { new SummaryParameters(null), new ClusterParams("testhittype"), new CacheParams(0, 0.0d), - documentdbInfoConfig, - 1); + documentdbInfoConfig); server.dispatch.packetData = BackendTestCase.PONG; Chain<Searcher> chain = new Chain<>(fastSearcher); Execution e = new Execution(chain, Execution.Context.createContextStub()); diff --git a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTester.java b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTester.java index e4d7e5fa076..d2638be4bc7 100644 --- a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTester.java +++ b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/FastSearcherTester.java @@ -1,6 +1,7 @@ package com.yahoo.prelude.fastsearch.test; import com.google.common.util.concurrent.MoreExecutors; +import com.yahoo.container.handler.VipStatus; import com.yahoo.net.HostName; import com.yahoo.prelude.fastsearch.CacheParams; import com.yahoo.prelude.fastsearch.ClusterParams; @@ -31,26 +32,26 @@ class FastSearcherTester { private final MockFS4ResourcePool mockFS4ResourcePool; private final FastSearcher fastSearcher; private final MockDispatcher mockDispatcher; + private final VipStatus vipStatus = new VipStatus(); - public FastSearcherTester(int containerNodeCount, SearchCluster.Node searchNode) { - this(containerNodeCount, Collections.singletonList(searchNode)); + public FastSearcherTester(int containerClusterSize, SearchCluster.Node searchNode) { + this(containerClusterSize, Collections.singletonList(searchNode)); } - public FastSearcherTester(int containerNodeCount, String... hostAndPortAndGroupStrings) { - this(containerNodeCount, toNodes(hostAndPortAndGroupStrings)); + public FastSearcherTester(int containerClusterSize, String... hostAndPortAndGroupStrings) { + this(containerClusterSize, toNodes(hostAndPortAndGroupStrings)); } - public FastSearcherTester(int containerNodeCount, List<SearchCluster.Node> searchNodes) { + public FastSearcherTester(int containerClusterSize, List<SearchCluster.Node> searchNodes) { mockFS4ResourcePool = new MockFS4ResourcePool(); - mockDispatcher = new MockDispatcher(searchNodes, mockFS4ResourcePool); + mockDispatcher = new MockDispatcher(searchNodes, mockFS4ResourcePool, containerClusterSize, vipStatus); fastSearcher = new FastSearcher(new MockBackend(selfHostname, MockFSChannel::new), mockFS4ResourcePool, mockDispatcher, new SummaryParameters(null), new ClusterParams("testhittype"), new CacheParams(100, 1e64), - new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder()), - containerNodeCount); + new DocumentdbInfoConfig(new DocumentdbInfoConfig.Builder())); } private static List<SearchCluster.Node> toNodes(String... hostAndPortAndGroupStrings) { @@ -92,5 +93,7 @@ class FastSearcherTester { mockDispatcher.searchCluster().ping(node, MoreExecutors.directExecutor()); mockDispatcher.searchCluster().pingIterationCompleted(); } + + public VipStatus vipStatus() { return vipStatus; } } diff --git a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/MockDispatcher.java b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/MockDispatcher.java index b4a631ff474..dd4b0ef8b5a 100644 --- a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/MockDispatcher.java +++ b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/MockDispatcher.java @@ -1,5 +1,6 @@ package com.yahoo.prelude.fastsearch.test; +import com.yahoo.container.handler.VipStatus; import com.yahoo.prelude.fastsearch.FS4ResourcePool; import com.yahoo.search.Result; import com.yahoo.search.dispatch.Dispatcher; @@ -11,11 +12,16 @@ import java.util.List; class MockDispatcher extends Dispatcher { public MockDispatcher(List<SearchCluster.Node> nodes) { - super(toDispatchConfig(nodes), new FS4ResourcePool(1)); + super(toDispatchConfig(nodes), new FS4ResourcePool(1), 1, new VipStatus()); } - public MockDispatcher(List<SearchCluster.Node> nodes, FS4ResourcePool fs4ResourcePool) { - super(toDispatchConfig(nodes), fs4ResourcePool); + public MockDispatcher(List<SearchCluster.Node> nodes, VipStatus vipStatus) { + super(toDispatchConfig(nodes), new FS4ResourcePool(1), 1, vipStatus); + } + + public MockDispatcher(List<SearchCluster.Node> nodes, FS4ResourcePool fs4ResourcePool, + int containerClusterSize, VipStatus vipStatus) { + super(toDispatchConfig(nodes), fs4ResourcePool, containerClusterSize, vipStatus); } private static DispatchConfig toDispatchConfig(List<SearchCluster.Node> nodes) { diff --git a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/fs4mock/MockFS4ResourcePool.java b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/fs4mock/MockFS4ResourcePool.java index 91f12184884..71a8556fb00 100644 --- a/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/fs4mock/MockFS4ResourcePool.java +++ b/container-search/src/test/java/com/yahoo/prelude/fastsearch/test/fs4mock/MockFS4ResourcePool.java @@ -16,9 +16,11 @@ public class MockFS4ResourcePool extends FS4ResourcePool { private final Map<String, Integer> requestsPerBackend = new HashMap<>(); private final Set<String> nonRespondingBackends = new HashSet<>(); private final Map<String, Long> activeDocumentsInBackend = new HashMap<>(); - + private final long testingThreadId; + public MockFS4ResourcePool() { super(1); + this.testingThreadId = Thread.currentThread().getId(); } @Override @@ -31,17 +33,23 @@ public class MockFS4ResourcePool extends FS4ResourcePool { () -> new MockFSChannel(activeDocumentsInBackend.getOrDefault(hostname, 0L))); } - /** Returns the number of times a backend for this hostname and port has been requested */ + /** + * Returns the number of times a backend for this hostname and port has been requested + * from the thread creating this + */ public int requestCount(String hostname, int port) { return requestsPerBackend.getOrDefault(hostname + ":" + port, 0); } - /** sets the number of active documents the given host will report to have in ping responses */ + /** Sets the number of active documents the given host will report to have in ping responses */ public void setActiveDocuments(String hostname, long activeDocuments) { activeDocumentsInBackend.put(hostname, activeDocuments); } private void countRequest(String hostAndPort) { + // ignore requests from the ping thread to avoid timing issues + if (Thread.currentThread().getId() != testingThreadId) return; + requestsPerBackend.put(hostAndPort, requestsPerBackend.getOrDefault(hostAndPort, 0) + 1); } diff --git a/container-search/src/test/java/com/yahoo/search/yql/MinimalQueryInserterTestCase.java b/container-search/src/test/java/com/yahoo/search/yql/MinimalQueryInserterTestCase.java index c2ce50b38b4..86ec570d6bb 100644 --- a/container-search/src/test/java/com/yahoo/search/yql/MinimalQueryInserterTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/yql/MinimalQueryInserterTestCase.java @@ -27,6 +27,8 @@ import com.yahoo.search.query.Sorting.UcaSorter; import com.yahoo.search.result.ErrorMessage; import com.yahoo.search.searchchain.Execution; +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; import java.util.ArrayList; import java.util.List; @@ -133,32 +135,50 @@ public class MinimalQueryInserterTestCase { } @Test + public void testExplicitLanguageIsHonoredWithVerbatimQuery() { + String japaneseWord = "\u30ab\u30bf\u30ab\u30ca"; + Query query = new Query("search/?language=ja" + "&yql=select%20ignoredField%20from%20ignoredsource%20where%20title%20contains%20%22" + encode(japaneseWord) + "%22%3B"); + execution.search(query); + assertEquals(Language.JAPANESE, query.getModel().getParsingLanguage()); + assertEquals("title:"+ japaneseWord, query.getModel().getQueryTree().toString()); + } + + @Test + public void testUserLanguageIsDetectedWithVerbatimQuery() { + String japaneseWord = "\u30ab\u30bf\u30ab\u30ca"; + Query query = new Query("search/?yql=select%20ignoredField%20from%20ignoredsource%20where%20title%20contains%20%22" + encode(japaneseWord) + "%22%3B"); + execution.search(query); + assertEquals(Language.JAPANESE, query.getModel().getParsingLanguage()); + assertEquals("title:"+ japaneseWord, query.getModel().getQueryTree().toString()); + } + + @Test public void testUserLanguageIsDetectedWithUserInput() { String japaneseWord = "\u30ab\u30bf\u30ab\u30ca"; - Query query = new Query("search/?userString=" + japaneseWord + "&yql=select%20ignoredfield%20from%20ignoredsource%20where%20title%20contains%20%22madonna%22%20and%20userInput(@userString)%3B"); + Query query = new Query("search/?userString=" + encode(japaneseWord) + "&yql=select%20ignoredfield%20from%20ignoredsource%20where%20title%20contains%20%22madonna%22%20and%20userInput(@userString)%3B"); execution.search(query); - assertEquals("AND title:madonna default:" + japaneseWord, query.getModel().getQueryTree().toString()); assertEquals(Language.JAPANESE, query.getModel().getParsingLanguage()); + assertEquals("AND title:madonna default:" + japaneseWord, query.getModel().getQueryTree().toString()); } @Test public void testUserLanguageIsDetectedWithUserQuery() { String japaneseWord = "\u30ab\u30bf\u30ab\u30ca"; - Query query = new Query("search/?query=" + japaneseWord + "&yql=select%20ignoredfield%20from%20ignoredsource%20where%20title%20contains%20%22madonna%22%20and%20userQuery()%3B"); + Query query = new Query("search/?query=" + encode(japaneseWord) + "&yql=select%20ignoredfield%20from%20ignoredsource%20where%20title%20contains%20%22madonna%22%20and%20userQuery()%3B"); execution.search(query); - assertEquals("AND title:madonna " + japaneseWord, query.getModel().getQueryTree().toString()); assertEquals(Language.JAPANESE, query.getModel().getParsingLanguage()); + assertEquals("AND title:madonna " + japaneseWord, query.getModel().getQueryTree().toString()); } @Test - public final void testUserQueryFailsWithoutArgument() { + public void testUserQueryFailsWithoutArgument() { Query query = new Query("search/?query=easilyRecognizedString&yql=select%20ignoredfield%20from%20ignoredsource%20where%20title%20contains%20%22madonna%22%20and%20userQuery()%3B"); execution.search(query); assertEquals("AND title:madonna easilyRecognizedString", query.getModel().getQueryTree().toString()); } @Test - public final void testSearchFromAllSourcesWithUserSource() { + public void testSearchFromAllSourcesWithUserSource() { Query query = new Query("search/?query=easilyRecognizedString&sources=abc&yql=select%20ignoredfield%20from%20sources%20*%20where%20title%20contains%20%22madonna%22%20and%20userQuery()%3B"); execution.search(query); assertEquals("AND title:madonna easilyRecognizedString", query.getModel().getQueryTree().toString()); @@ -166,7 +186,7 @@ public class MinimalQueryInserterTestCase { } @Test - public final void testSearchFromAllSourcesWithoutUserSource() { + public void testSearchFromAllSourcesWithoutUserSource() { Query query = new Query("search/?query=easilyRecognizedString&yql=select%20ignoredfield%20from%20sources%20*%20where%20title%20contains%20%22madonna%22%20and%20userQuery()%3B"); execution.search(query); assertEquals("AND title:madonna easilyRecognizedString", query.getModel().getQueryTree().toString()); @@ -174,7 +194,7 @@ public class MinimalQueryInserterTestCase { } @Test - public final void testSearchFromSomeSourcesWithoutUserSource() { + public void testSearchFromSomeSourcesWithoutUserSource() { Query query = new Query("search/?query=easilyRecognizedString&yql=select%20ignoredfield%20from%20sources%20sourceA,%20sourceB%20where%20title%20contains%20%22madonna%22%20and%20userQuery()%3B"); execution.search(query); assertEquals("AND title:madonna easilyRecognizedString", query.getModel().getQueryTree().toString()); @@ -184,8 +204,8 @@ public class MinimalQueryInserterTestCase { } @Test - public final void testSearchFromSomeSourcesWithUserSource() { - final Query query = new Query("search/?query=easilyRecognizedString&sources=abc&yql=select%20ignoredfield%20from%20sources%20sourceA,%20sourceB%20where%20title%20contains%20%22madonna%22%20and%20userQuery()%3B"); + public void testSearchFromSomeSourcesWithUserSource() { + Query query = new Query("search/?query=easilyRecognizedString&sources=abc&yql=select%20ignoredfield%20from%20sources%20sourceA,%20sourceB%20where%20title%20contains%20%22madonna%22%20and%20userQuery()%3B"); execution.search(query); assertEquals("AND title:madonna easilyRecognizedString", query.getModel().getQueryTree().toString()); assertEquals(3, query.getModel().getSources().size()); @@ -206,8 +226,8 @@ public class MinimalQueryInserterTestCase { } @Test - public final void testLimitAndOffset() { - final Query query = new Query("search/?yql=select%20*%20from%20sources%20*%20where%20title%20contains%20%22madonna%22%20limit%2031offset%207%3B"); + public void testLimitAndOffset() { + Query query = new Query("search/?yql=select%20*%20from%20sources%20*%20where%20title%20contains%20%22madonna%22%20limit%2031offset%207%3B"); execution.search(query); assertEquals(7, query.getOffset()); assertEquals(24, query.getHits()); @@ -216,8 +236,8 @@ public class MinimalQueryInserterTestCase { } @Test - public final void testMaxOffset() { - final Query query = new Query("search/?yql=select%20*%20from%20sources%20*%20where%20title%20contains%20%22madonna%22%20limit%2040031offset%2040000%3B"); + public void testMaxOffset() { + Query query = new Query("search/?yql=select%20*%20from%20sources%20*%20where%20title%20contains%20%22madonna%22%20limit%2040031offset%2040000%3B"); Result r = execution.search(query); assertEquals(1, r.hits().getErrorHit().errors().size()); ErrorMessage e = r.hits().getErrorHit().errorIterator().next(); @@ -226,8 +246,8 @@ public class MinimalQueryInserterTestCase { } @Test - public final void testMaxLimit() { - final Query query = new Query("search/?yql=select%20*%20from%20sources%20*%20where%20title%20contains%20%22madonna%22%20limit%2040000offset%207%3B"); + public void testMaxLimit() { + Query query = new Query("search/?yql=select%20*%20from%20sources%20*%20where%20title%20contains%20%22madonna%22%20limit%2040000offset%207%3B"); Result r = execution.search(query); assertEquals(1, r.hits().getErrorHit().errors().size()); ErrorMessage e = r.hits().getErrorHit().errorIterator().next(); @@ -236,15 +256,15 @@ public class MinimalQueryInserterTestCase { } @Test - public final void testTimeout() { - final Query query = new Query("search/?yql=select%20*%20from%20sources%20*%20where%20title%20contains%20%22madonna%22%20timeout%2051%3B"); + public void testTimeout() { + Query query = new Query("search/?yql=select%20*%20from%20sources%20*%20where%20title%20contains%20%22madonna%22%20timeout%2051%3B"); execution.search(query); assertEquals(51L, query.getTimeout()); assertEquals("select * from sources * where title contains \"madonna\" timeout 51;", query.yqlRepresentation()); } @Test - public final void testOrdering() { + public void testOrdering() { { String yql = "select%20ignoredfield%20from%20ignoredsource%20where%20title%20contains%20%22madonna%22%20order%20by%20something%2C%20shoesize%20desc%20limit%20300%20timeout%203%3B"; Query query = new Query("search/?yql=" + yql); @@ -276,22 +296,20 @@ public class MinimalQueryInserterTestCase { Query query = new Query("search/?yql=" + yql); execution.search(query); { - final FieldOrder fieldOrder = query.getRanking().getSorting() - .fieldOrders().get(0); + FieldOrder fieldOrder = query.getRanking().getSorting().fieldOrders().get(0); assertEquals("other", fieldOrder.getFieldName()); assertEquals(Order.DESCENDING, fieldOrder.getSortOrder()); - final AttributeSorter sorter = fieldOrder.getSorter(); + AttributeSorter sorter = fieldOrder.getSorter(); assertEquals(UcaSorter.class, sorter.getClass()); - final UcaSorter uca = (UcaSorter) sorter; + UcaSorter uca = (UcaSorter) sorter; assertEquals("en_US", uca.getLocale()); assertEquals(UcaSorter.Strength.IDENTICAL, uca.getStrength()); } { - final FieldOrder fieldOrder = query.getRanking().getSorting() - .fieldOrders().get(1); + FieldOrder fieldOrder = query.getRanking().getSorting().fieldOrders().get(1); assertEquals("something", fieldOrder.getFieldName()); assertEquals(Order.ASCENDING, fieldOrder.getSortOrder()); - final AttributeSorter sorter = fieldOrder.getSorter(); + AttributeSorter sorter = fieldOrder.getSorter(); assertEquals(LowerCaseSorter.class, sorter.getClass()); } assertEquals("select foo from bar where title contains \"madonna\" order by [{\"function\": \"uca\", \"locale\": \"en_US\", \"strength\": \"IDENTICAL\"}]other desc, [{\"function\": \"lowercase\"}]something limit 300 timeout 3;", @@ -300,7 +318,7 @@ public class MinimalQueryInserterTestCase { } @Test - public final void testStringReprBasicSanity() { + public void testStringReprBasicSanity() { String yql = "select%20ignoredfield%20from%20ignoredsource%20where%20title%20contains%20%22madonna%22%20order%20by%20something%2C%20shoesize%20desc%20limit%20300%20timeout%203%3B"; Query query = new Query("search/?yql=" + yql); execution.search(query); @@ -316,4 +334,14 @@ public class MinimalQueryInserterTestCase { } assertEquals(expected, actual.toString()); } + + private String encode(String s) { + try { + return URLEncoder.encode(s, "utf-8"); + } + catch (UnsupportedEncodingException e) { + throw new RuntimeException("Will never happen"); + } + } + } diff --git a/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java b/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java index 1931dd2179e..e3797b1f63e 100644 --- a/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java +++ b/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java @@ -175,7 +175,7 @@ public class VdsStreamingSearcherTestCase { for (int i=0; i<result.hits().size(); ++i) { Hit hit = result.hits().get(i); if (idPrefix != null) { - assertEquals("VdsStreamingSearcher", hit.getSource()); + assertEquals("clusterName", hit.getSource()); assertEquals(idPrefix + i, hit.getId().toString()); } else { assertNull(hit.getSource()); diff --git a/docker-api/pom.xml b/docker-api/pom.xml index e11f9b5ba01..8bccf7eec10 100644 --- a/docker-api/pom.xml +++ b/docker-api/pom.xml @@ -108,12 +108,6 @@ <scope>compile</scope> </dependency> <dependency> - <groupId>com.yahoo.vespa</groupId> - <artifactId>application-model</artifactId> - <version>${project.version}</version> - <scope>compile</scope> - </dependency> - <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <scope>test</scope> diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Container.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Container.java index 1dc54311277..c23a11f18eb 100644 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Container.java +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Container.java @@ -1,21 +1,19 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.dockerapi; -import com.yahoo.vespa.applicationmodel.HostName; - import java.util.Objects; /** * @author stiankri */ public class Container { - public final HostName hostname; + public final String hostname; public final DockerImage image; public final ContainerName name; public final boolean isRunning; public Container( - final HostName hostname, + final String hostname, final DockerImage image, final ContainerName containerName, final boolean isRunning) { diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStatsImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStatsImpl.java new file mode 100644 index 00000000000..b8838489068 --- /dev/null +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStatsImpl.java @@ -0,0 +1,40 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi; + +import java.util.Map; + +/** + * Wrapper class for {@link com.github.dockerjava.api.model.Statistics} to prevent leaking from docker-java library. + * + * @author valerijf + */ +public class ContainerStatsImpl implements Docker.ContainerStats { + private final Map<String, Object> networks; + private final Map<String, Object> cpuStats; + private final Map<String, Object> memoryStats; + private final Map<String, Object> blkioStats; + + public ContainerStatsImpl(Map<String, Object> networks, Map<String, Object> cpuStats, + Map<String, Object> memoryStats, Map<String, Object> blkioStats) { + this.networks = networks; + this.cpuStats = cpuStats; + this.memoryStats = memoryStats; + this.blkioStats = blkioStats; + } + + public Map<String, Object> getNetworks() { + return networks; + } + + public Map<String, Object> getCpuStats() { + return cpuStats; + } + + public Map<String, Object> getMemoryStats() { + return memoryStats; + } + + public Map<String, Object> getBlkioStats() { + return blkioStats; + } +} diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java index 691d979df73..883c94cc5b7 100644 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java @@ -5,7 +5,6 @@ import com.github.dockerjava.api.DockerClient; import com.github.dockerjava.api.command.CreateContainerCmd; import com.github.dockerjava.api.exception.DockerException; import com.github.dockerjava.api.model.Bind; -import com.yahoo.vespa.applicationmodel.HostName; import java.net.Inet6Address; import java.net.InetAddress; @@ -22,7 +21,7 @@ class CreateContainerCommandImpl implements Docker.CreateContainerCommand { private final DockerClient docker; private final DockerImage dockerImage; private final ContainerName containerName; - private final HostName hostName; + private final String hostName; private final Map<String, String> labels = new HashMap<>(); private final List<String> environmentAssignments = new ArrayList<>(); private final List<String> volumeBindSpecs = new ArrayList<>(); @@ -35,7 +34,7 @@ class CreateContainerCommandImpl implements Docker.CreateContainerCommand { CreateContainerCommandImpl(DockerClient docker, DockerImage dockerImage, ContainerName containerName, - HostName hostName) { + String hostName) { this.docker = docker; this.dockerImage = dockerImage; this.containerName = containerName; @@ -100,7 +99,7 @@ class CreateContainerCommandImpl implements Docker.CreateContainerCommand { CreateContainerCmd containerCmd = docker .createContainerCmd(dockerImage.asString()) .withName(containerName.asString()) - .withHostName(hostName.s()) + .withHostName(hostName) .withMacAddress(generateRandomMACAddress()) .withLabels(labels) .withEnv(environmentAssignments) diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java index 895d5b6ac4e..2a17b139d2b 100644 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java @@ -1,10 +1,9 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.dockerapi; -import com.yahoo.vespa.applicationmodel.HostName; - import java.net.InetAddress; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -27,7 +26,8 @@ public interface Docker { CreateContainerCommand createContainerCommand( DockerImage dockerImage, ContainerName containerName, - HostName hostName); + String hostName); + interface ContainerInfo { /** returns Optional.empty() if not running. */ @@ -36,6 +36,16 @@ public interface Docker { ContainerInfo inspectContainer(ContainerName containerName); + + interface ContainerStats { + Map<String, Object> getNetworks(); + Map<String, Object> getCpuStats(); + Map<String, Object> getMemoryStats(); + Map<String, Object> getBlkioStats(); + } + + ContainerStats getContainerStats(ContainerName containerName); + void startContainer(ContainerName containerName); void stopContainer(ContainerName containerName); @@ -44,9 +54,11 @@ public interface Docker { void connectContainerToNetwork(ContainerName containerName, String networkName); + void copyArchiveToContainer(String sourcePath, ContainerName destinationContainer, String destinationPath); + List<Container> getAllManagedContainers(); - Optional<Container> getContainer(HostName hostname); + Optional<Container> getContainer(String hostname); CompletableFuture<DockerImage> pullImageAsync(DockerImage image); diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java index 683eef0cb19..3d2b7c590e7 100644 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java @@ -10,18 +10,24 @@ import com.github.dockerjava.api.exception.DockerClientException; import com.github.dockerjava.api.exception.DockerException; import com.github.dockerjava.api.model.Image; import com.github.dockerjava.api.model.Network; +import com.github.dockerjava.api.model.Statistics; import com.github.dockerjava.core.DefaultDockerClientConfig; import com.github.dockerjava.core.DockerClientImpl; import com.github.dockerjava.core.RemoteApiVersion; +import com.github.dockerjava.core.async.ResultCallbackTemplate; import com.github.dockerjava.core.command.ExecStartResultCallback; import com.github.dockerjava.core.command.PullImageResultCallback; import com.github.dockerjava.jaxrs.JerseyDockerCmdExecFactory; import com.google.inject.Inject; import com.yahoo.collections.Pair; import com.yahoo.log.LogLevel; +import com.yahoo.net.HostName; import com.yahoo.system.ProcessExecuter; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.defaults.Defaults; +import com.yahoo.vespa.hosted.dockerapi.metrics.CounterWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.Dimensions; +import com.yahoo.vespa.hosted.dockerapi.metrics.GaugeWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import javax.annotation.concurrent.GuardedBy; import java.io.ByteArrayOutputStream; @@ -67,12 +73,15 @@ public class DockerImpl implements Docker { final DockerClient dockerClient; + private GaugeWrapper numberOfRunningContainersGauge; + private CounterWrapper numberOfDockerDaemonFails; + DockerImpl(final DockerClient dockerClient) { this.dockerClient = dockerClient; } @Inject - public DockerImpl(final DockerConfig config) { + public DockerImpl(final DockerConfig config, MetricReceiverWrapper metricReceiver) { JerseyDockerCmdExecFactory dockerFactory = new JerseyDockerCmdExecFactory() .withMaxPerRouteConnections(DOCKER_MAX_PER_ROUTE_CONNECTIONS) .withMaxTotalConnections(DOCKER_MAX_TOTAL_CONNECTIONS) @@ -81,9 +90,9 @@ public class DockerImpl implements Docker { RemoteApiVersion remoteApiVersion; try { - remoteApiVersion = RemoteApiVersion.parseConfig(DockerClientImpl.getInstance( - buildDockerClientConfig(config).build()) - .withDockerCmdExecFactory(dockerFactory).versionCmd().exec().getApiVersion()); + remoteApiVersion = RemoteApiVersion.parseConfig(DockerClientImpl.getInstance( + buildDockerClientConfig(config).build()) + .withDockerCmdExecFactory(dockerFactory).versionCmd().exec().getApiVersion()); logger.info("Found version of remote docker API: "+ remoteApiVersion); // From version 1.24 a field was removed which causes trouble with the current docker java code. // When this is fixed, we can remove this and do not specify version. @@ -98,8 +107,8 @@ public class DockerImpl implements Docker { this.dockerClient = DockerClientImpl.getInstance( buildDockerClientConfig(config) - .withApiVersion(remoteApiVersion) - .build()) + .withApiVersion(remoteApiVersion) + .build()) .withDockerCmdExecFactory(dockerFactory); try { @@ -107,6 +116,16 @@ public class DockerImpl implements Docker { } catch (Exception e) { throw new RuntimeException("Could not setup docker network", e); } + + Dimensions dimensions = new Dimensions.Builder() + .add("host", HostName.getLocalhost()) + .add("role", "docker").build(); + + numberOfRunningContainersGauge = metricReceiver.declareGauge(dimensions, "containers.running"); + numberOfDockerDaemonFails = metricReceiver.declareCounter(dimensions, "daemon.api_fails"); + + // Some containers could already be running, count them and intialize to that value + numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } static DefaultDockerClientConfig.Builder buildDockerClientConfig(DockerConfig config) { @@ -152,6 +171,12 @@ public class DockerImpl implements Docker { .exec(); } + @Override + public void copyArchiveToContainer(String sourcePath, ContainerName destinationContainer, String destinationPath) { + dockerClient.copyArchiveToContainerCmd(destinationContainer.asString()) + .withHostResource(sourcePath).withRemotePath(destinationPath).exec(); + } + @Override public CompletableFuture<DockerImage> pullImageAsync(final DockerImage image) { @@ -209,21 +234,27 @@ public class DockerImpl implements Docker { flatMap(image -> Arrays.stream(image.getRepoTags())). anyMatch(tag -> tag.equals(dockerImage.asString())); } catch (DockerException e) { + numberOfDockerDaemonFails.add(); throw new RuntimeException("Failed to list image name: '" + dockerImage + "'", e); } } @Override - public CreateContainerCommand createContainerCommand(DockerImage image, ContainerName name, HostName hostName) { + public CreateContainerCommand createContainerCommand(DockerImage image, ContainerName name, String hostName) { return new CreateContainerCommandImpl(dockerClient, image, name, hostName) .withLabel(LABEL_NAME_MANAGEDBY, LABEL_VALUE_MANAGEDBY); } @Override public void connectContainerToNetwork(ContainerName containerName, String networkName) { - dockerClient.connectToNetworkCmd() - .withContainerId(containerName.asString()) - .withNetworkId(networkName).exec(); + try { + dockerClient.connectToNetworkCmd() + .withContainerId(containerName.asString()) + .withNetworkId(networkName).exec(); + } catch (DockerException e) { + numberOfDockerDaemonFails.add(); + throw new RuntimeException("Failed to connect container to network", e); + } } @Override @@ -248,6 +279,7 @@ public class DockerImpl implements Docker { return new ProcessResult(exitCode, new String(output.toByteArray()), new String(errors.toByteArray())); } catch (DockerException | InterruptedException e) { + numberOfDockerDaemonFails.add(); throw new RuntimeException("Container " + containerName.asString() + " failed to execute " + Arrays.toString(args), e); } @@ -255,8 +287,30 @@ public class DockerImpl implements Docker { @Override public ContainerInfo inspectContainer(ContainerName containerName) { - InspectContainerResponse containerInfo = dockerClient.inspectContainerCmd(containerName.asString()).exec(); - return new ContainerInfoImpl(containerName, containerInfo); + try { + InspectContainerResponse containerInfo = dockerClient.inspectContainerCmd(containerName.asString()).exec(); + return new ContainerInfoImpl(containerName, containerInfo); + } catch (DockerException e) { + numberOfDockerDaemonFails.add(); + throw new RuntimeException("Failed to get container info", e); + } + } + + public ContainerStats getContainerStats(ContainerName containerName) { + try { + // TODO: Uncomment this to get container stats through docker-java when the jersey issues are resolved + // DockerStatsCallback statsCallback = dockerClient.statsCmd(containerName.asString()).exec(new DockerStatsCallback()); + // statsCallback.awaitCompletion(5, TimeUnit.SECONDS); + + Statistics stats = DockerStatsCmd.getContainerStatistics(containerName); + return new ContainerStatsImpl(stats.getNetworks(), stats.getCpuStats(), + stats.getMemoryStats(), stats.getBlkioStats()); + } catch (DockerException e) { + numberOfDockerDaemonFails.add(); + throw new RuntimeException("Failed to get container stats", e); + } catch (IOException e) { + throw new RuntimeException("Failed to get container stats", e); + } } @Override @@ -265,13 +319,14 @@ public class DockerImpl implements Docker { if (dockerContainer.isPresent()) { try { dockerClient.startContainerCmd(dockerContainer.get().getId()).exec(); + numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } catch (DockerException e) { + numberOfDockerDaemonFails.add(); throw new RuntimeException("Failed to start container", e); } } } - @Override public void stopContainer(final ContainerName containerName) { Optional<com.github.dockerjava.api.model.Container> dockerContainer = getContainerFromName(containerName, true); @@ -279,6 +334,7 @@ public class DockerImpl implements Docker { try { dockerClient.stopContainerCmd(dockerContainer.get().getId()).withTimeout(SECONDS_TO_WAIT_BEFORE_KILLING).exec(); } catch (DockerException e) { + numberOfDockerDaemonFails.add(); throw new RuntimeException("Failed to stop container", e); } } @@ -290,7 +346,9 @@ public class DockerImpl implements Docker { if (dockerContainer.isPresent()) { try { dockerClient.removeContainerCmd(dockerContainer.get().getId()).exec(); + numberOfRunningContainersGauge.sample(getAllManagedContainers().size()); } catch (DockerException e) { + numberOfDockerDaemonFails.add(); throw new RuntimeException("Failed to delete container", e); } } @@ -304,12 +362,13 @@ public class DockerImpl implements Docker { .flatMap(this::asContainer) .collect(Collectors.toList()); } catch (DockerException e) { + numberOfDockerDaemonFails.add(); throw new RuntimeException("Could not retrieve all container", e); } } @Override - public Optional<Container> getContainer(HostName hostname) { + public Optional<Container> getContainer(String hostname) { // TODO Don't rely on getAllManagedContainers return getAllManagedContainers().stream() .filter(c -> Objects.equals(hostname, c.hostname)) @@ -320,11 +379,12 @@ public class DockerImpl implements Docker { try { final InspectContainerResponse response = dockerClient.inspectContainerCmd(dockerClientContainer.getId()).exec(); return Stream.of(new Container( - new HostName(response.getConfig().getHostName()), + response.getConfig().getHostName(), new DockerImage(dockerClientContainer.getImage()), new ContainerName(decode(response.getName())), response.getState().getRunning())); } catch (DockerException e) { + numberOfDockerDaemonFails.add(); //TODO: do proper exception handling throw new RuntimeException("Failed talking to docker daemon", e); } @@ -362,7 +422,12 @@ public class DockerImpl implements Docker { @Override public void deleteImage(final DockerImage dockerImage) { - dockerClient.removeImageCmd(dockerImage.asString()).exec(); + try { + dockerClient.removeImageCmd(dockerImage.asString()).exec(); + } catch (DockerException e) { + numberOfDockerDaemonFails.add(); + throw new RuntimeException("Failed to delete docker image " + dockerImage.asString(), e); + } } private Map<String, Image> filterOutImagesUsedByContainers( @@ -444,11 +509,7 @@ public class DockerImpl implements Docker { @Override public void deleteUnusedDockerImages(Set<DockerImage> except) { - try { - getUnusedDockerImages(except).stream().forEach(this::deleteImage); - } catch (DockerException e) { - throw new RuntimeException("Unexpected exception", e); - } + getUnusedDockerImages(except).stream().forEach(this::deleteImage); } private class ImagePullCallback extends PullImageResultCallback { @@ -474,4 +535,16 @@ public class DockerImpl implements Docker { } } } + + private class DockerStatsCallback extends ResultCallbackTemplate<DockerStatsCallback, Statistics> { + private Statistics stats; + + @Override + public void onNext(Statistics stats) { + if (stats != null) { + this.stats = stats; + onComplete(); + } + } + } } diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerStatsCmd.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerStatsCmd.java new file mode 100644 index 00000000000..a0ee9bedaf2 --- /dev/null +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerStatsCmd.java @@ -0,0 +1,25 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.dockerjava.api.model.Statistics; + +import java.io.IOException; +import java.net.URL; + + +/** + * Class that makes HTTP request to get docker container stats as docker-java's + * {@link com.github.dockerjava.api.DockerClient#statsCmd(String)} fails because of jersey version conflict. + * + * @author valerijf + */ +public class DockerStatsCmd { + private static final ObjectMapper objectMapper = new ObjectMapper(); + + public static Statistics getContainerStatistics(ContainerName containerName) throws IOException { + URL url = new URL("http://localhost:2376/containers/" + containerName.asString() + "/stats"); + + return objectMapper.readValue(url, Statistics.class); + } +} diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/CounterWrapper.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/CounterWrapper.java new file mode 100644 index 00000000000..f6b398e83a2 --- /dev/null +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/CounterWrapper.java @@ -0,0 +1,39 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi.metrics; + +import com.yahoo.metrics.simple.Counter; + +/** + * Forwards sample to {@link com.yahoo.metrics.simple.Counter} to be displayed in /state/v1/metrics, + * while also saving the value so it can be accessed programatically later. + * + * @author valerijf + */ +public class CounterWrapper implements MetricValue { + private final Object lock = new Object(); + + private final Counter counter; + private long value = 0; + + CounterWrapper(Counter counter) { + this.counter = counter; + } + + public void add() { + add(1L); + } + + public void add(long n) { + synchronized (lock) { + counter.add(n); + value += n; + } + } + + @Override + public Number getValue() { + synchronized (lock) { + return value; + } + } +}
\ No newline at end of file diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Dimensions.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Dimensions.java new file mode 100644 index 00000000000..cb5185e440c --- /dev/null +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Dimensions.java @@ -0,0 +1,51 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi.metrics; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * Each metric reported to secret agent has dimmensions. + * + * @author valerijf + */ +public class Dimensions { + public final Map<String, Object> dimensionsMap; + + private Dimensions(Map<String, Object> dimensionsMap) { + this.dimensionsMap = dimensionsMap; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Dimensions that = (Dimensions) o; + return dimensionsMap.equals(that.dimensionsMap); + } + + @Override + public int hashCode() { + return dimensionsMap.hashCode(); + } + + @Override + public String toString() { + return dimensionsMap.toString(); + } + + public static class Builder { + private final Map<String, String> dimensionsMap = new HashMap<>(); + + public Dimensions.Builder add(String dimensionName, String dimensionValue) { + dimensionsMap.put(dimensionName, dimensionValue); + return this; + } + + public Dimensions build() { + return new Dimensions(Collections.unmodifiableMap(new HashMap<>(dimensionsMap))); + } + } +} diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/GaugeWrapper.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/GaugeWrapper.java new file mode 100644 index 00000000000..db0670c2f87 --- /dev/null +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/GaugeWrapper.java @@ -0,0 +1,35 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi.metrics; + +import com.yahoo.metrics.simple.Gauge; + +/** + * Forwards sample to {@link com.yahoo.metrics.simple.Gauge} to be displayed in /state/v1/metrics, + * while also saving the value so it can be accessed programatically later. + * + * @author valerijf + */ +public class GaugeWrapper implements MetricValue { + private final Object lock = new Object(); + + private final Gauge gauge; + private double value; + + GaugeWrapper(Gauge gauge) { + this.gauge = gauge; + } + + public void sample(double x) { + synchronized (lock) { + gauge.sample(x); + this.value = x; + } + } + + @Override + public Number getValue() { + synchronized (lock) { + return value; + } + } +} diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapper.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapper.java new file mode 100644 index 00000000000..821e0db0688 --- /dev/null +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapper.java @@ -0,0 +1,112 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi.metrics; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.inject.Inject; +import com.yahoo.metrics.simple.MetricReceiver; +import com.yahoo.metrics.simple.Point; + +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Export metrics to both /state/v1/metrics and makes them available programatically. + * + * @author valerijf + */ +public class MetricReceiverWrapper implements Iterable<MetricReceiverWrapper.DimensionMetrics> { + private final static ObjectMapper objectMapper = new ObjectMapper(); + private final Object monitor = new Object(); + private final Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = new HashMap<>(); + private final MetricReceiver metricReceiver; + + @Inject + public MetricReceiverWrapper(MetricReceiver metricReceiver) { + this.metricReceiver = metricReceiver; + } + + /** + * Declaring the same dimensions and name results in the same CounterWrapper instance (idempotent). + */ + public CounterWrapper declareCounter(Dimensions dimensions, String name) { + synchronized (monitor) { + if (!metricsByDimensions.containsKey(dimensions)) metricsByDimensions.put(dimensions, new HashMap<>()); + if (!metricsByDimensions.get(dimensions).containsKey(name)) { + CounterWrapper counter = new CounterWrapper(metricReceiver.declareCounter(name, new Point(dimensions.dimensionsMap))); + metricsByDimensions.get(dimensions).put(name, counter); + } + + return (CounterWrapper) metricsByDimensions.get(dimensions).get(name); + } + } + + /** + * Declaring the same dimensions and name results in the same GaugeWrapper instance (idempotent). + */ + public GaugeWrapper declareGauge(Dimensions dimensions, String name) { + synchronized (monitor) { + if (!metricsByDimensions.containsKey(dimensions)) + metricsByDimensions.put(dimensions, new HashMap<>()); + if (!metricsByDimensions.get(dimensions).containsKey(name)) { + GaugeWrapper gauge = new GaugeWrapper(metricReceiver.declareGauge(name, new Point(dimensions.dimensionsMap))); + metricsByDimensions.get(dimensions).put(name, gauge); + } + + return (GaugeWrapper) metricsByDimensions.get(dimensions).get(name); + } + } + + public void unsetMetricsForContainer(String hostname) { + synchronized (monitor) { + Set<Dimensions> dimensions = metricsByDimensions.keySet(); + for (Dimensions dimension : dimensions) { + if (dimension.dimensionsMap.containsKey("host") && dimension.dimensionsMap.get("host").equals(hostname)) { + metricsByDimensions.remove(dimension); + } + } + } + } + + + @Override + public Iterator<DimensionMetrics> iterator() { + synchronized (monitor) { + return metricsByDimensions.entrySet().stream().map(entry -> + new DimensionMetrics(entry.getKey(), entry.getValue())).iterator(); + } + } + + // For testing + Map<String, Number> getMetricsForDimension(Dimensions dimensions) { + synchronized (monitor) { + return metricsByDimensions.get(dimensions).entrySet().stream().collect( + Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getValue())); + } + } + + public class DimensionMetrics { + private final Dimensions dimensions; + private final Map<String, Object> metrics; + + DimensionMetrics(Dimensions dimensions, Map<String, MetricValue> metricValues) { + this.dimensions = dimensions; + this.metrics = metricValues.entrySet().stream().collect( + Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getValue())); + } + + public String toSecretAgentReport() throws JsonProcessingException { + Map<String, Object> report = new LinkedHashMap<>(); + report.put("application", "docker"); + report.put("timestamp", System.currentTimeMillis() / 1000); + report.put("dimensions", dimensions.dimensionsMap); + report.put("metrics", metrics); + + return objectMapper.writeValueAsString(report); + } + } +} diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java new file mode 100644 index 00000000000..f9e04694cb5 --- /dev/null +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java @@ -0,0 +1,9 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi.metrics; + +/** + * @author valerijf + */ +public interface MetricValue { + Number getValue(); +} diff --git a/staging_vespalib/src/vespa/vespalib/stllike/smallvector.cpp b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/package-info.java index 59d71217c41..d7818e3b8ee 100644 --- a/staging_vespalib/src/vespa/vespalib/stllike/smallvector.cpp +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/package-info.java @@ -1,2 +1,5 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include "smallvector.h" +@ExportPackage +package com.yahoo.vespa.hosted.dockerapi.metrics; + +import com.yahoo.osgi.annotation.ExportPackage; diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerTest.java index c34411b1d61..6c5d6b1f3bb 100644 --- a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerTest.java +++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerTest.java @@ -3,7 +3,8 @@ package com.yahoo.vespa.hosted.dockerapi; import com.github.dockerjava.api.model.Network; import com.github.dockerjava.core.command.BuildImageResultCallback; -import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.metrics.simple.MetricReceiver; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import org.junit.After; import org.junit.Before; import org.junit.Ignore; @@ -43,7 +44,7 @@ public class DockerTest { .clientKeyPath("") .uri("unix:///var/run/docker.sock")); - private static final DockerImpl docker = new DockerImpl(dockerConfig); + private static final DockerImpl docker = new DockerImpl(dockerConfig, new MetricReceiverWrapper(MetricReceiver.nullImplementation)); private static final DockerImage dockerImage = new DockerImage("simple-ipv6-server:Dockerfile"); @@ -67,8 +68,8 @@ public class DockerTest { @Ignore @Test public void testDockerNetworking() throws InterruptedException, ExecutionException, IOException { - HostName hostName1 = new HostName("docker10.test.yahoo.com"); - HostName hostName2 = new HostName("docker11.test.yahoo.com"); + String hostName1 = "docker10.test.yahoo.com"; + String hostName2 = "docker11.test.yahoo.com"; ContainerName containerName1 = new ContainerName("test-container-1"); ContainerName containerName2 = new ContainerName("test-container-2"); InetAddress inetAddress1 = Inet6Address.getByName("fe80::10"); diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapperTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapperTest.java new file mode 100644 index 00000000000..82de7c8c8d3 --- /dev/null +++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapperTest.java @@ -0,0 +1,80 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi.metrics; + +import com.yahoo.metrics.simple.MetricReceiver; +import org.junit.Test; + +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * @author valerijf + */ +public class MetricReceiverWrapperTest { + private static final Dimensions hostDimension = new Dimensions.Builder().add("host", "abc.yahoo.com").build(); + + @Test + public void testDefaultValue() { + MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + metricReceiver.declareCounter(hostDimension, "some.name"); + + assertEquals(metricReceiver.getMetricsForDimension(hostDimension).get("some.name"), 0L); + } + + @Test + public void testSimpleIncrementMetric() { + MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + CounterWrapper counter = metricReceiver.declareCounter(hostDimension, "a_counter.value"); + + counter.add(5); + counter.add(8); + + Map<String, Number> latestMetrics = metricReceiver.getMetricsForDimension(hostDimension); + assertTrue("Expected only 1 metric value to be set", latestMetrics.size() == 1); + assertEquals(latestMetrics.get("a_counter.value"), 13L); // 5 + 8 + } + + @Test + public void testSimpleGauge() { + MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + GaugeWrapper gauge = metricReceiver.declareGauge(hostDimension, "test.gauge"); + + gauge.sample(42); + gauge.sample(-342.23); + + Map<String, Number> latestMetrics = metricReceiver.getMetricsForDimension(hostDimension); + assertTrue("Expected only 1 metric value to be set", latestMetrics.size() == 1); + assertEquals(latestMetrics.get("test.gauge"), -342.23); + } + + @Test + public void testRedeclaringSameGauge() { + MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + GaugeWrapper gauge = metricReceiver.declareGauge(hostDimension, "test.gauge"); + gauge.sample(42); + + // Same as hostDimension, but new instance. + Dimensions newDimension = new Dimensions.Builder().add("host", "abc.yahoo.com").build(); + GaugeWrapper newGauge = metricReceiver.declareGauge(newDimension, "test.gauge"); + newGauge.sample(56); + + assertEquals(metricReceiver.getMetricsForDimension(hostDimension).get("test.gauge"), 56.); + } + + @Test + public void testSameMetricNameButDifferentDimensions() { + MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + GaugeWrapper gauge = metricReceiver.declareGauge(hostDimension, "test.gauge"); + gauge.sample(42); + + // Not the same as hostDimension. + Dimensions newDimension = new Dimensions.Builder().add("host", "abcd.yahoo.com").build(); + GaugeWrapper newGauge = metricReceiver.declareGauge(newDimension, "test.gauge"); + newGauge.sample(56); + + assertEquals(metricReceiver.getMetricsForDimension(hostDimension).get("test.gauge"), 42.); + assertEquals(metricReceiver.getMetricsForDimension(newDimension).get("test.gauge"), 56.); + } +} diff --git a/document/pom.xml b/document/pom.xml index 9b096f3c89a..71713b27050 100644 --- a/document/pom.xml +++ b/document/pom.xml @@ -78,6 +78,12 @@ <classifier>no_aop</classifier> <scope>provided</scope> </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>testutil</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> </dependencies> <build> <plugins> diff --git a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java index a575fbfba2a..9d7139e6226 100644 --- a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java +++ b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java @@ -11,7 +11,7 @@ import java.util.ArrayList; import java.util.logging.Logger; /** - * Configures the Vepsa document manager from a document id. + * Configures the Vespa document manager from a config id. * * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a> */ diff --git a/document/src/main/java/com/yahoo/document/json/DocumentUpdateJsonSerializer.java b/document/src/main/java/com/yahoo/document/json/DocumentUpdateJsonSerializer.java new file mode 100644 index 00000000000..a059df01ad5 --- /dev/null +++ b/document/src/main/java/com/yahoo/document/json/DocumentUpdateJsonSerializer.java @@ -0,0 +1,341 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.document.json; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import com.yahoo.document.DataType; +import com.yahoo.document.Document; +import com.yahoo.document.DocumentUpdate; +import com.yahoo.document.annotation.AnnotationReference; +import com.yahoo.document.datatypes.Array; +import com.yahoo.document.datatypes.ByteFieldValue; +import com.yahoo.document.datatypes.CollectionFieldValue; +import com.yahoo.document.datatypes.DoubleFieldValue; +import com.yahoo.document.datatypes.FieldValue; +import com.yahoo.document.datatypes.FloatFieldValue; +import com.yahoo.document.datatypes.IntegerFieldValue; +import com.yahoo.document.datatypes.LongFieldValue; +import com.yahoo.document.datatypes.MapFieldValue; +import com.yahoo.document.datatypes.PredicateFieldValue; +import com.yahoo.document.datatypes.Raw; +import com.yahoo.document.datatypes.StringFieldValue; +import com.yahoo.document.datatypes.Struct; +import com.yahoo.document.datatypes.StructuredFieldValue; +import com.yahoo.document.datatypes.TensorFieldValue; +import com.yahoo.document.datatypes.WeightedSet; +import com.yahoo.document.serialization.DocumentUpdateWriter; +import com.yahoo.document.serialization.FieldWriter; +import com.yahoo.document.update.AddValueUpdate; +import com.yahoo.document.update.ArithmeticValueUpdate; +import com.yahoo.document.update.AssignValueUpdate; +import com.yahoo.document.update.ClearValueUpdate; +import com.yahoo.document.update.FieldUpdate; +import com.yahoo.document.update.MapValueUpdate; +import com.yahoo.document.update.RemoveValueUpdate; +import com.yahoo.document.update.ValueUpdate; +import com.yahoo.vespa.objects.FieldBase; +import com.yahoo.vespa.objects.Serializer; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; + +import static com.yahoo.document.json.JsonSerializationHelper.*; + +/** + * The DocumentUpdateJsonSerializer utility class is used to serialize a DocumentUpdate instance using the JSON format described in + * <a href="https://git.corp.yahoo.com/pages/vespa/documentation/documentation/reference/document-json-format.html#update">Document JSON Format: The Update Structure</a> + * + * @see #serialize(com.yahoo.document.DocumentUpdate) + * @author Vegard Sjonfjell + */ +public class DocumentUpdateJsonSerializer +{ + private final JsonFactory jsonFactory = new JsonFactory(); + private final JsonDocumentUpdateWriter writer = new JsonDocumentUpdateWriter(); + private JsonGenerator generator; + + /** + * Instantiate a DocumentUpdateJsonSerializer that outputs JSON to an OutputStream + */ + public DocumentUpdateJsonSerializer(OutputStream outputStream) { + wrapIOException(() -> generator = jsonFactory.createGenerator(outputStream)); + } + + /** + * Instantiate a DocumentUpdateJsonSerializer that writes JSON using existing JsonGenerator + */ + public DocumentUpdateJsonSerializer(JsonGenerator generator) { + this.generator = generator; + } + + /** + * Serialize a DocumentUpdate tree to JSON + */ + public void serialize(DocumentUpdate update) { + writer.write(update); + } + + private class JsonDocumentUpdateWriter implements DocumentUpdateWriter, FieldWriter { + @Override + public void write(DocumentUpdate update) { + wrapIOException(() -> { + generator.writeStartObject(); + generator.writeStringField("update", update.getId().toString()); + + if (update.getCondition().isPresent()) { + generator.writeStringField("condition", update.getCondition().getSelection()); + } + + generator.writeObjectFieldStart("fields"); + for (FieldUpdate up : update.getFieldUpdates()) { + up.serialize(this); + } + generator.writeEndObject(); + + generator.writeEndObject(); + generator.flush(); + }); + } + + @Override + public void write(FieldUpdate fieldUpdate) { + wrapIOException(() -> { + generator.writeObjectFieldStart(fieldUpdate.getField().getName()); + + ArrayList<ValueUpdate> removeValueUpdates = new ArrayList<>(); + ArrayList<ValueUpdate> addValueUpdates = new ArrayList<>(); + + final DataType dataType = fieldUpdate.getField().getDataType(); + for (ValueUpdate valueUpdate : fieldUpdate.getValueUpdates()) { + if (valueUpdate instanceof RemoveValueUpdate) { + removeValueUpdates.add(valueUpdate); + } else if (valueUpdate instanceof AddValueUpdate) { + addValueUpdates.add(valueUpdate); + } else { + valueUpdate.serialize(this, dataType); + } + } + + writeAddOrRemoveValueUpdates("remove", removeValueUpdates, dataType); + writeAddOrRemoveValueUpdates("add", addValueUpdates, dataType); + + generator.writeEndObject(); + }); + } + + private void writeAddOrRemoveValueUpdates(String arrayFieldName, ArrayList<ValueUpdate> valueUpdates, DataType dataType) throws IOException { + if (!valueUpdates.isEmpty()) { + generator.writeArrayFieldStart(arrayFieldName); + for (ValueUpdate valueUpdate : valueUpdates) { + valueUpdate.serialize(this, dataType); + } + generator.writeEndArray(); + } + } + + @Override + public void write(AddValueUpdate update, DataType superType) { + update.getValue().serialize(this); + } + + /* This is the 'match' operation */ + @Override + public void write(MapValueUpdate update, DataType superType) { + wrapIOException(() -> { + generator.writeObjectFieldStart("match"); + generator.writeFieldName("element"); + update.getValue().serialize(null, this); + update.getUpdate().serialize(this, superType); + generator.writeEndObject(); + }); + } + + @Override + public void write(ArithmeticValueUpdate update) { + final ArithmeticValueUpdate.Operator operator = update.getOperator(); + final String operationKey; + + switch (operator) { + case ADD: + operationKey = "increment"; + break; + case DIV: + operationKey = "divide"; + break; + case MUL: + operationKey = "multiply"; + break; + case SUB: + operationKey = "decrement"; + break; + default: + throw new RuntimeException(String.format("Unrecognized arithmetic operator '%s'", operator.name)); + } + + wrapIOException(() -> generator.writeFieldName(operationKey)); + update.getValue().serialize(this); + } + + @Override + public void write(AssignValueUpdate update, DataType superType) { + wrapIOException(() -> generator.writeFieldName("assign")); + update.getValue().serialize(null, this); + } + + @Override + public void write(RemoveValueUpdate update, DataType superType) { + update.getValue().serialize(null, this); + } + + @Override + public void write(ClearValueUpdate clearValueUpdate, DataType superType) { + wrapIOException(() -> generator.writeNullField("assign")); + } + + @Override + public void write(FieldBase field, FieldValue value) { + throw new JsonSerializationException(String.format("Serialization of field values of type %s is not supported", value.getClass().getName())); + } + + @Override + public void write(FieldBase field, Document value) { + throw new JsonSerializationException("Serialization of 'Document fields' is not supported"); + } + + @Override + public <T extends FieldValue> void write(FieldBase field, Array<T> array) { + serializeArrayField(this, generator, field, array); + } + + @Override + public <K extends FieldValue, V extends FieldValue> void write(FieldBase field, MapFieldValue<K, V> map) { + serializeMapField(this, generator, field, map); + } + + @Override + public void write(FieldBase field, ByteFieldValue value) { + serializeByteField(generator, field, value); + } + + @Override + public <T extends FieldValue> void write(FieldBase field, CollectionFieldValue<T> value) { + serializeCollectionField(this, generator, field, value); + } + + @Override + public void write(FieldBase field, DoubleFieldValue value) { + serializeDoubleField(generator, field, value); + } + + @Override + public void write(FieldBase field, FloatFieldValue value) { + serializeFloatField(generator, field, value); + } + + @Override + public void write(FieldBase field, IntegerFieldValue value) { + serializeIntField(generator, field, value); + } + + @Override + public void write(FieldBase field, LongFieldValue value) { + serializeLongField(generator, field, value); + } + + @Override + public void write(FieldBase field, Raw value) { + serializeRawField(generator, field, value); + } + + @Override + public void write(FieldBase field, PredicateFieldValue value) { + serializePredicateField(generator, field, value); + } + + @Override + public void write(FieldBase field, StringFieldValue value) { + serializeStringField(generator, field, value); + } + + @Override + public void write(FieldBase field, TensorFieldValue value) { + serializeTensorField(generator, field, value); + } + + @Override + public void write(FieldBase field, Struct value) { + serializeStructField(this, generator, field, value); + } + + @Override + public void write(FieldBase field, StructuredFieldValue value) { + serializeStructuredField(this, generator, field, value); + } + + @Override + public <T extends FieldValue> void write(FieldBase field, WeightedSet<T> weightedSet) { + serializeWeightedSet(generator, field, weightedSet); + } + + @Override + public void write(FieldBase field, AnnotationReference value) { + // Serialization of annotations are not implemented + } + + @Override + public Serializer putByte(FieldBase field, byte value) { + serializeByte(generator, field, value); + return this; + } + + @Override + public Serializer putShort(FieldBase field, short value) { + serializeShort(generator, field, value); + return this; + } + + @Override + public Serializer putInt(FieldBase field, int value) { + serializeInt(generator, field, value); + return this; + } + + @Override + public Serializer putLong(FieldBase field, long value) { + serializeLong(generator, field, value); + return this; + } + + @Override + public Serializer putFloat(FieldBase field, float value) { + serializeFloat(generator, field, value); + return this; + } + + @Override + public Serializer putDouble(FieldBase field, double value) { + serializeDouble(generator, field, value); + return this; + } + + @Override + public Serializer put(FieldBase field, byte[] value) { + serializeByteArray(generator, field, value); + return this; + } + + @Override + public Serializer put(FieldBase field, ByteBuffer value) { + serializeByteBuffer(generator, field, value); + return this; + } + + @Override + public Serializer put(FieldBase field, String value) { + serializeString(generator, field, value); + return this; + } + } +}
\ No newline at end of file diff --git a/document/src/main/java/com/yahoo/document/json/JsonSerializationHelper.java b/document/src/main/java/com/yahoo/document/json/JsonSerializationHelper.java new file mode 100644 index 00000000000..639c6ad5300 --- /dev/null +++ b/document/src/main/java/com/yahoo/document/json/JsonSerializationHelper.java @@ -0,0 +1,297 @@ +package com.yahoo.document.json; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.yahoo.document.Field; +import com.yahoo.document.PositionDataType; +import com.yahoo.document.datatypes.Array; +import com.yahoo.document.datatypes.ByteFieldValue; +import com.yahoo.document.datatypes.CollectionFieldValue; +import com.yahoo.document.datatypes.DoubleFieldValue; +import com.yahoo.document.datatypes.FieldValue; +import com.yahoo.document.datatypes.FloatFieldValue; +import com.yahoo.document.datatypes.IntegerFieldValue; +import com.yahoo.document.datatypes.LongFieldValue; +import com.yahoo.document.datatypes.MapFieldValue; +import com.yahoo.document.datatypes.PredicateFieldValue; +import com.yahoo.document.datatypes.Raw; +import com.yahoo.document.datatypes.StringFieldValue; +import com.yahoo.document.datatypes.Struct; +import com.yahoo.document.datatypes.StructuredFieldValue; +import com.yahoo.document.datatypes.TensorFieldValue; +import com.yahoo.document.datatypes.WeightedSet; +import com.yahoo.document.serialization.FieldWriter; +import com.yahoo.tensor.Tensor; +import com.yahoo.tensor.TensorAddress; +import com.yahoo.vespa.objects.FieldBase; +import org.apache.commons.codec.binary.Base64; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +/** + * @author Steinar Knutsen + * @author Vegard Sjonfjell + */ +public class JsonSerializationHelper { + private final static Base64 base64Encoder = new Base64(); + + static class JsonSerializationException extends RuntimeException { + public JsonSerializationException(Exception base) { + super(base); + } + + public JsonSerializationException(String message) { + super(message); + } + } + + @FunctionalInterface + static interface SubroutineThrowingIOException { + void invoke() throws IOException; + } + + static void wrapIOException(SubroutineThrowingIOException lambda) { + try { + lambda.invoke(); + } catch (IOException e) { + throw new JsonSerializationException(e); + } + } + + public static void serializeTensorField(JsonGenerator generator, FieldBase field, TensorFieldValue value) { + wrapIOException(() -> { + fieldNameIfNotNull(generator, field); + generator.writeStartObject(); + + if (value.getTensor().isPresent()) { + Tensor tensor = value.getTensor().get(); + serializeTensorDimensions(generator, tensor.dimensions()); + serializeTensorCells(generator, tensor.cells()); + } + generator.writeEndObject(); + }); + } + + private static void serializeTensorDimensions(JsonGenerator generator, Set<String> dimensions) throws IOException { + generator.writeArrayFieldStart(JsonReader.TENSOR_DIMENSIONS); + for (String dimension : dimensions) { + generator.writeString(dimension); + } + + generator.writeEndArray(); + } + + private static void serializeTensorCells(JsonGenerator generator, Map<TensorAddress, Double> cells) throws IOException { + generator.writeArrayFieldStart(JsonReader.TENSOR_CELLS); + for (Map.Entry<TensorAddress, Double> cell : cells.entrySet()) { + generator.writeStartObject(); + serializeTensorAddress(generator, cell.getKey()); + generator.writeNumberField(JsonReader.TENSOR_VALUE, cell.getValue()); + generator.writeEndObject(); + } + + generator.writeEndArray(); + } + + private static void serializeTensorAddress(JsonGenerator generator, TensorAddress address) throws IOException { + generator.writeObjectFieldStart(JsonReader.TENSOR_ADDRESS); + for (TensorAddress.Element element : address.elements()) { + generator.writeStringField(element.dimension(), element.label()); + } + + generator.writeEndObject(); + } + + + public static void serializeStringField(JsonGenerator generator, FieldBase field, StringFieldValue value) { + // Hide empty strings + if (value.getString().length() == 0) { + return; + } + + serializeString(generator, field, value.getString()); + } + + public static void serializeStructuredField(FieldWriter fieldWriter, JsonGenerator generator, FieldBase field, StructuredFieldValue value) { + fieldNameIfNotNull(generator, field); + + wrapIOException(() -> { + generator.writeStartObject(); + Iterator<Map.Entry<Field, FieldValue>> i = value.iterator(); + + while (i.hasNext()) { + Map.Entry<Field, FieldValue> entry = i.next(); + entry.getValue().serialize(entry.getKey(), fieldWriter); + } + + generator.writeEndObject(); + }); + } + + public static void serializeStructField(FieldWriter fieldWriter, JsonGenerator generator, FieldBase field, Struct value) { + if (value.getDataType() == PositionDataType.INSTANCE) { + serializeString(generator, field, PositionDataType.renderAsString(value)); + return; + } + + serializeStructuredField(fieldWriter, generator, field, value); + } + + public static <T extends FieldValue> void serializeWeightedSet(JsonGenerator generator, FieldBase field, WeightedSet<T> value) { + fieldNameIfNotNull(generator, field); + + wrapIOException(() -> { + generator.writeStartObject(); + + for (T key : value.keySet()) { + Integer weight = value.get(key); + // key.toString() is according to spec + generator.writeNumberField(key.toString(), weight); + } + + generator.writeEndObject(); + }); + } + + public static <T extends FieldValue> void serializeCollectionField(FieldWriter fieldWriter, JsonGenerator generator, FieldBase field, CollectionFieldValue<T> value) { + fieldNameIfNotNull(generator, field); + + wrapIOException(() -> { + generator.writeStartArray(); + Iterator<T> i = value.iterator(); + + while (i.hasNext()) { + i.next().serialize(null, fieldWriter); + } + + generator.writeEndArray(); + }); + } + + + public static <K extends FieldValue, V extends FieldValue> void serializeMapField(FieldWriter fieldWriter, JsonGenerator generator, FieldBase field, MapFieldValue<K, V> map) { + fieldNameIfNotNull(generator, field); + wrapIOException(() -> { + generator.writeStartArray(); + + for (Map.Entry<K, V> entry : map.entrySet()) { + generator.writeStartObject(); + generator.writeFieldName(JsonReader.MAP_KEY); + entry.getKey().serialize(null, fieldWriter); + generator.writeFieldName(JsonReader.MAP_VALUE); + entry.getValue().serialize(null, fieldWriter); + generator.writeEndObject(); + } + + generator.writeEndArray(); + }); + } + + public static <T extends FieldValue> void serializeArrayField(FieldWriter fieldWriter, JsonGenerator generator, FieldBase field, Array<T> value) { + wrapIOException(() -> { + fieldNameIfNotNull(generator, field); + generator.writeStartArray(); + + for (T elem : value) { + elem.serialize(null, fieldWriter); + } + + generator.writeEndArray(); + }); + } + + public static void serializeDoubleField(JsonGenerator generator, FieldBase field, DoubleFieldValue value) { + serializeDouble(generator, field, value.getDouble()); + } + + public static void serializeFloatField(JsonGenerator generator, FieldBase field, FloatFieldValue value) { + serializeFloat(generator, field, value.getFloat()); + } + + public static void serializeIntField(JsonGenerator generator, FieldBase field, IntegerFieldValue value) { + serializeInt(generator, field, value.getInteger()); + } + + public static void serializeLongField(JsonGenerator generator, FieldBase field, LongFieldValue value) { + serializeLong(generator, field, value.getLong()); + } + + public static void serializeByteField(JsonGenerator generator, FieldBase field, ByteFieldValue value) { + serializeByte(generator, field, value.getByte()); + } + + public static void serializePredicateField(JsonGenerator generator, FieldBase field, PredicateFieldValue value){ + serializeString(generator, field, value.toString()); + } + + public static void serializeRawField(JsonGenerator generator, FieldBase field, Raw raw) { + serializeByteBuffer(generator, field, raw.getByteBuffer()); + } + + public static void serializeString(JsonGenerator generator, FieldBase field, String value) { + if (value.length() == 0) { + return; + } + + fieldNameIfNotNull(generator, field); + wrapIOException(() -> generator.writeString(value)); + } + + public static void serializeByte(JsonGenerator generator, FieldBase field, byte value) { + fieldNameIfNotNull(generator, field); + wrapIOException(() -> generator.writeNumber(value)); + } + + public static void serializeShort(JsonGenerator generator, FieldBase field, short value) { + fieldNameIfNotNull(generator, field); + wrapIOException(() -> generator.writeNumber(value)); + } + + public static void serializeInt(JsonGenerator generator, FieldBase field, int value) { + fieldNameIfNotNull(generator, field); + wrapIOException(() -> generator.writeNumber(value)); + } + + public static void serializeLong(JsonGenerator generator, FieldBase field, long value) { + fieldNameIfNotNull(generator, field); + wrapIOException(() -> generator.writeNumber(value)); + } + + public static void serializeFloat(JsonGenerator generator, FieldBase field, float value) { + fieldNameIfNotNull(generator, field); + wrapIOException(() -> generator.writeNumber(value)); + } + + public static void serializeDouble(JsonGenerator generator, FieldBase field, double value) { + fieldNameIfNotNull(generator, field); + wrapIOException(() -> generator.writeNumber(value)); + } + + public static void serializeByteBuffer(JsonGenerator generator, FieldBase field, ByteBuffer raw) { + fieldNameIfNotNull(generator, field); + + final byte[] data = new byte[raw.remaining()]; + final int origPosition = raw.position(); + + // base64encoder has no encode methods with offset and + // limit, so no use trying to get at the backing array if + // available anyway + raw.get(data); + raw.position(origPosition); + + wrapIOException(() -> generator.writeString(base64Encoder.encodeToString(data))); + } + + public static void serializeByteArray(JsonGenerator generator, FieldBase field, byte[] value) { + serializeByteBuffer(generator, field, ByteBuffer.wrap(value)); + } + + public static void fieldNameIfNotNull(JsonGenerator generator, FieldBase field) { + if (field != null) { + wrapIOException(() -> generator.writeFieldName(field.getName())); + } + } +} diff --git a/document/src/main/java/com/yahoo/document/json/JsonWriter.java b/document/src/main/java/com/yahoo/document/json/JsonWriter.java index 79a1c040cbc..420a6bb6669 100644 --- a/document/src/main/java/com/yahoo/document/json/JsonWriter.java +++ b/document/src/main/java/com/yahoo/document/json/JsonWriter.java @@ -1,34 +1,43 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.document.json; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; - -import com.yahoo.document.datatypes.*; -import com.yahoo.tensor.Tensor; -import com.yahoo.tensor.TensorAddress; -import org.apache.commons.codec.binary.Base64; - import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.yahoo.document.Document; import com.yahoo.document.DocumentId; import com.yahoo.document.DocumentType; import com.yahoo.document.Field; -import com.yahoo.document.PositionDataType; import com.yahoo.document.annotation.AnnotationReference; +import com.yahoo.document.datatypes.Array; +import com.yahoo.document.datatypes.ByteFieldValue; +import com.yahoo.document.datatypes.CollectionFieldValue; +import com.yahoo.document.datatypes.DoubleFieldValue; +import com.yahoo.document.datatypes.FieldValue; +import com.yahoo.document.datatypes.FloatFieldValue; +import com.yahoo.document.datatypes.IntegerFieldValue; +import com.yahoo.document.datatypes.LongFieldValue; +import com.yahoo.document.datatypes.MapFieldValue; +import com.yahoo.document.datatypes.PredicateFieldValue; +import com.yahoo.document.datatypes.Raw; +import com.yahoo.document.datatypes.StringFieldValue; +import com.yahoo.document.datatypes.Struct; +import com.yahoo.document.datatypes.StructuredFieldValue; +import com.yahoo.document.datatypes.TensorFieldValue; +import com.yahoo.document.datatypes.WeightedSet; import com.yahoo.document.serialization.DocumentWriter; import com.yahoo.vespa.objects.FieldBase; import com.yahoo.vespa.objects.Serializer; - import edu.umd.cs.findbugs.annotations.NonNull; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.Map; + +import static com.yahoo.document.json.JsonSerializationHelper.*; + /** * Serialize Document and other FieldValue instances as JSON. * @@ -38,7 +47,6 @@ public class JsonWriter implements DocumentWriter { private static final JsonFactory jsonFactory = new JsonFactory(); private final JsonGenerator generator; - private final Base64 base64Encoder = new Base64(); // I really hate exception unsafe constructors, but the alternative // requires generator to not be a final @@ -85,24 +93,26 @@ public class JsonWriter implements DocumentWriter { */ @Override public void write(FieldBase field, FieldValue value) { - throw new UnsupportedOperationException("Serializing " - + value.getClass().getName() + " is not supported."); + throw new UnsupportedOperationException("Serializing " + value.getClass().getName() + " is not supported."); } @Override public void write(FieldBase field, Document value) { try { - fieldNameIfNotNull(field); + fieldNameIfNotNull(generator, field); generator.writeStartObject(); + // this makes it impossible to refeed directly, not sure what's correct // perhaps just change to "put"? generator.writeStringField("id", value.getId().toString()); generator.writeObjectFieldStart(JsonReader.FIELDS); - for (Iterator<Entry<Field, FieldValue>> i = value.iterator(); i - .hasNext();) { - Entry<Field, FieldValue> entry = i.next(); + + Iterator<Map.Entry<Field, FieldValue>> i = value.iterator(); + while (i.hasNext()) { + Map.Entry<Field, FieldValue> entry = i.next(); entry.getValue().serialize(entry.getKey(), this); } + generator.writeEndObject(); generator.writeEndObject(); generator.flush(); @@ -113,199 +123,77 @@ public class JsonWriter implements DocumentWriter { @Override public <T extends FieldValue> void write(FieldBase field, Array<T> value) { - try { - fieldNameIfNotNull(field); - generator.writeStartArray(); - for (Iterator<T> i = value.iterator(); i.hasNext();) { - i.next().serialize(null, this); - } - generator.writeEndArray(); - } catch (IOException e) { - throw new RuntimeException(e); - } - - } - - private void fieldNameIfNotNull(FieldBase field) { - if (field != null) { - try { - generator.writeFieldName(field.getName()); - } catch (IOException e) { - throw new RuntimeException(e); - } - } + serializeArrayField(this, generator, field, value); } @Override - public <K extends FieldValue, V extends FieldValue> void write( - FieldBase field, MapFieldValue<K, V> map) { - fieldNameIfNotNull(field); - try { - generator.writeStartArray(); - for (Map.Entry<K, V> entry : map.entrySet()) { - generator.writeStartObject(); - generator.writeFieldName(JsonReader.MAP_KEY); - entry.getKey().serialize(null, this); - generator.writeFieldName(JsonReader.MAP_VALUE); - entry.getValue().serialize(null, this); - generator.writeEndObject(); - } - generator.writeEndArray(); - } catch (IOException e) { - throw new RuntimeException(e); - } + public <K extends FieldValue, V extends FieldValue> void write(FieldBase field, MapFieldValue<K, V> map) { + serializeMapField(this, generator, field, map); } @Override public void write(FieldBase field, ByteFieldValue value) { - putByte(field, value.getByte()); + serializeByteField(generator, field, value); } @Override - public <T extends FieldValue> void write(FieldBase field, - CollectionFieldValue<T> value) { - fieldNameIfNotNull(field); - try { - generator.writeStartArray(); - for (Iterator<T> i = value.iterator(); i.hasNext();) { - i.next().serialize(null, this); - } - generator.writeEndArray(); - } catch (IOException e) { - throw new RuntimeException(e); - } + public <T extends FieldValue> void write(FieldBase field, CollectionFieldValue<T> value) { + serializeCollectionField(this, generator, field, value); } @Override public void write(FieldBase field, DoubleFieldValue value) { - putDouble(field, value.getDouble()); + serializeDoubleField(generator, field, value); } @Override public void write(FieldBase field, FloatFieldValue value) { - putFloat(field, value.getFloat()); + serializeFloatField(generator, field, value); } @Override public void write(FieldBase field, IntegerFieldValue value) { - putInt(field, value.getInteger()); + serializeIntField(generator, field, value); } @Override public void write(FieldBase field, LongFieldValue value) { - putLong(field, value.getLong()); + serializeLongField(generator, field, value); } @Override public void write(FieldBase field, Raw value) { - put(field, value.getByteBuffer()); + serializeRawField(generator, field, value); } @Override public void write(FieldBase field, PredicateFieldValue value) { - put(field, value.toString()); + serializePredicateField(generator, field, value); } @Override public void write(FieldBase field, StringFieldValue value) { - put(field, value.getString()); + serializeStringField(generator, field, value); } @Override public void write(FieldBase field, TensorFieldValue value) { - try { - fieldNameIfNotNull(field); - generator.writeStartObject(); - if (value.getTensor().isPresent()) { - Tensor tensor = value.getTensor().get(); - writeTensorDimensions(tensor.dimensions()); - writeTensorCells(tensor.cells()); - } - generator.writeEndObject(); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - private void writeTensorDimensions(Set<String> dimensions) throws IOException { - generator.writeArrayFieldStart(JsonReader.TENSOR_DIMENSIONS); - for (String dimension : dimensions) { - generator.writeString(dimension); - } - generator.writeEndArray(); - } - - private void writeTensorCells(Map<TensorAddress, Double> cells) throws IOException { - generator.writeArrayFieldStart(JsonReader.TENSOR_CELLS); - for (Map.Entry<TensorAddress, Double> cell : cells.entrySet()) { - generator.writeStartObject(); - writeTensorAddress(cell.getKey()); - generator.writeNumberField(JsonReader.TENSOR_VALUE, cell.getValue()); - generator.writeEndObject(); - } - generator.writeEndArray(); - } - - private void writeTensorAddress(TensorAddress address) throws IOException { - generator.writeObjectFieldStart(JsonReader.TENSOR_ADDRESS); - for (TensorAddress.Element element : address.elements()) { - generator.writeStringField(element.dimension(), element.label()); - } - generator.writeEndObject(); + serializeTensorField(generator, field, value); } @Override public void write(FieldBase field, Struct value) { - if (value.getDataType() == PositionDataType.INSTANCE) { - put(field, PositionDataType.renderAsString(value)); - return; - } - fieldNameIfNotNull(field); - try { - generator.writeStartObject(); - for (Iterator<Entry<Field, FieldValue>> i = value.iterator(); i - .hasNext();) { - Entry<Field, FieldValue> entry = i.next(); - entry.getValue().serialize(entry.getKey(), this); - } - generator.writeEndObject(); - } catch (IOException e) { - throw new RuntimeException(e); - } + serializeStructField(this, generator, field, value); } @Override public void write(FieldBase field, StructuredFieldValue value) { - fieldNameIfNotNull(field); - try { - generator.writeStartObject(); - for (Iterator<Entry<Field, FieldValue>> i = value.iterator(); i - .hasNext();) { - Entry<Field, FieldValue> entry = i.next(); - entry.getValue().serialize(entry.getKey(), this); - } - generator.writeEndObject(); - } catch (IOException e) { - throw new RuntimeException(e); - } + serializeStructuredField(this, generator, field, value); } @Override - public <T extends FieldValue> void write(FieldBase field, - WeightedSet<T> value) { - fieldNameIfNotNull(field); - try { - generator.writeStartObject(); - // entrySet() is deprecated and there is no entry iterator - for (T key : value.keySet()) { - Integer weight = value.get(key); - // key.toString() is according to spec - generator.writeNumberField(key.toString(), weight); - } - generator.writeEndObject(); - } catch (IOException e) { - throw new RuntimeException(e); - } + public <T extends FieldValue> void write(FieldBase field, WeightedSet<T> value) { + serializeWeightedSet(generator, field, value); } @Override @@ -315,110 +203,6 @@ public class JsonWriter implements DocumentWriter { } @Override - public Serializer putByte(FieldBase field, byte value) { - fieldNameIfNotNull(field); - try { - generator.writeNumber(value); - } catch (IOException e) { - throw new RuntimeException(e); - } - return this; - } - - @Override - public Serializer putShort(FieldBase field, short value) { - fieldNameIfNotNull(field); - try { - generator.writeNumber(value); - } catch (IOException e) { - throw new RuntimeException(e); - } - return this; - } - - @Override - public Serializer putInt(FieldBase field, int value) { - fieldNameIfNotNull(field); - try { - generator.writeNumber(value); - } catch (IOException e) { - throw new RuntimeException(e); - } - return this; - } - - @Override - public Serializer putLong(FieldBase field, long value) { - fieldNameIfNotNull(field); - try { - generator.writeNumber(value); - } catch (IOException e) { - throw new RuntimeException(e); - } - return this; - } - - @Override - public Serializer putFloat(FieldBase field, float value) { - fieldNameIfNotNull(field); - try { - generator.writeNumber(value); - } catch (IOException e) { - throw new RuntimeException(e); - } - return this; - } - - @Override - public Serializer putDouble(FieldBase field, double value) { - fieldNameIfNotNull(field); - try { - generator.writeNumber(value); - } catch (IOException e) { - throw new RuntimeException(e); - } - return this; - } - - @Override - public Serializer put(FieldBase field, byte[] value) { - return put(field, ByteBuffer.wrap(value)); - } - - @Override - public Serializer put(FieldBase field, ByteBuffer raw) { - final byte[] data = new byte[raw.remaining()]; - final int origPosition = raw.position(); - - fieldNameIfNotNull(field); - // base64encoder has no encode methods with offset and - // limit, so no use trying to get at the backing array if - // available anyway - raw.get(data); - raw.position(origPosition); - try { - generator.writeString(base64Encoder.encodeToString(data)); - } catch (IOException e) { - throw new RuntimeException(e); - } - return this; - } - - @Override - public Serializer put(FieldBase field, String value) { - if (value.length() == 0) { - return this; - } - fieldNameIfNotNull(field); - try { - generator.writeString(value); - } catch (IOException e) { - throw new RuntimeException(e); - } - return this; - } - - @Override public void write(Document document) { write(null, document); } @@ -470,4 +254,58 @@ public class JsonWriter implements DocumentWriter { } return out.toByteArray(); } + + @Override + public Serializer putByte(FieldBase field, byte value) { + serializeByte(generator, field, value); + return this; + } + + @Override + public Serializer putShort(FieldBase field, short value) { + serializeShort(generator, field, value); + return this; + } + + @Override + public Serializer putInt(FieldBase field, int value) { + serializeInt(generator, field, value); + return this; + } + + @Override + public Serializer putLong(FieldBase field, long value) { + serializeLong(generator, field, value); + return this; + } + + @Override + public Serializer putFloat(FieldBase field, float value) { + serializeFloat(generator, field, value); + return this; + } + + @Override + public Serializer putDouble(FieldBase field, double value) { + serializeDouble(generator, field, value); + return this; + } + + @Override + public Serializer put(FieldBase field, byte[] value) { + serializeByteArray(generator, field, value); + return this; + } + + @Override + public Serializer put(FieldBase field, ByteBuffer value) { + serializeByteBuffer(generator, field, value); + return this; + } + + @Override + public Serializer put(FieldBase field, String value) { + serializeString(generator, field, value); + return this; + } } diff --git a/document/src/main/java/com/yahoo/document/serialization/FieldWriter.java b/document/src/main/java/com/yahoo/document/serialization/FieldWriter.java index 4a269a704d2..244aa0d5a0d 100644 --- a/document/src/main/java/com/yahoo/document/serialization/FieldWriter.java +++ b/document/src/main/java/com/yahoo/document/serialization/FieldWriter.java @@ -3,7 +3,22 @@ package com.yahoo.document.serialization; import com.yahoo.document.Document; import com.yahoo.document.annotation.AnnotationReference; -import com.yahoo.document.datatypes.*; +import com.yahoo.document.datatypes.Array; +import com.yahoo.document.datatypes.ByteFieldValue; +import com.yahoo.document.datatypes.CollectionFieldValue; +import com.yahoo.document.datatypes.DoubleFieldValue; +import com.yahoo.document.datatypes.FieldValue; +import com.yahoo.document.datatypes.FloatFieldValue; +import com.yahoo.document.datatypes.IntegerFieldValue; +import com.yahoo.document.datatypes.LongFieldValue; +import com.yahoo.document.datatypes.MapFieldValue; +import com.yahoo.document.datatypes.PredicateFieldValue; +import com.yahoo.document.datatypes.Raw; +import com.yahoo.document.datatypes.StringFieldValue; +import com.yahoo.document.datatypes.Struct; +import com.yahoo.document.datatypes.StructuredFieldValue; +import com.yahoo.document.datatypes.TensorFieldValue; +import com.yahoo.document.datatypes.WeightedSet; import com.yahoo.vespa.objects.FieldBase; import com.yahoo.vespa.objects.Serializer; @@ -189,5 +204,4 @@ public interface FieldWriter extends Serializer { * field value */ void write(FieldBase field, AnnotationReference value); - } diff --git a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer42.java b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer42.java index 49e61f64e3d..9e764aae798 100644 --- a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer42.java +++ b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer42.java @@ -78,8 +78,8 @@ import static com.yahoo.text.Utf8.calculateStringPositions; * @deprecated Please use {@link com.yahoo.document.serialization.VespaDocumentDeserializerHead} instead for new code. * @author baldersheim */ -@Deprecated // OK: Don't remove on Vespa 6: Mail may have documents on this format still -// When removing: Move content of this class into VespaDocumentDeserializerHead (and subclass VespaDocumentSerializerHead in that) +@Deprecated // Remove on Vespa 7 +// When removing: Move content of this class into VespaDocumentDeserializerHead public class VespaDocumentDeserializer42 extends VespaDocumentSerializer42 implements DocumentDeserializer { private final Compressor compressor = new Compressor(); @@ -597,8 +597,8 @@ public class VespaDocumentDeserializer42 extends VespaDocumentSerializer42 imple DocumentType docType = manager.getDocumentType(new DataTypeName(docTypeName)); if (docType == null) { - throw new DeserializationException( - "No known document type with name " + new Utf8String(docTypeName).toString()); + throw new DeserializationException("No known document type with name " + + new Utf8String(docTypeName).toString()); } return docType; } diff --git a/document/src/main/java/com/yahoo/document/serialization/XmlDocumentWriter.java b/document/src/main/java/com/yahoo/document/serialization/XmlDocumentWriter.java index ffe6073cd6c..dc605fbe3d5 100644 --- a/document/src/main/java/com/yahoo/document/serialization/XmlDocumentWriter.java +++ b/document/src/main/java/com/yahoo/document/serialization/XmlDocumentWriter.java @@ -1,21 +1,36 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.document.serialization; -import java.nio.ByteBuffer; -import java.util.ArrayDeque; -import java.util.Deque; -import java.util.Iterator; -import java.util.Map; - import com.yahoo.document.Document; import com.yahoo.document.DocumentId; import com.yahoo.document.DocumentType; import com.yahoo.document.Field; import com.yahoo.document.annotation.AnnotationReference; -import com.yahoo.document.datatypes.*; +import com.yahoo.document.datatypes.Array; +import com.yahoo.document.datatypes.ByteFieldValue; +import com.yahoo.document.datatypes.CollectionFieldValue; +import com.yahoo.document.datatypes.DoubleFieldValue; +import com.yahoo.document.datatypes.FieldValue; +import com.yahoo.document.datatypes.FloatFieldValue; +import com.yahoo.document.datatypes.IntegerFieldValue; +import com.yahoo.document.datatypes.LongFieldValue; +import com.yahoo.document.datatypes.MapFieldValue; +import com.yahoo.document.datatypes.PredicateFieldValue; +import com.yahoo.document.datatypes.Raw; +import com.yahoo.document.datatypes.StringFieldValue; +import com.yahoo.document.datatypes.Struct; +import com.yahoo.document.datatypes.StructuredFieldValue; +import com.yahoo.document.datatypes.TensorFieldValue; +import com.yahoo.document.datatypes.WeightedSet; import com.yahoo.vespa.objects.FieldBase; import com.yahoo.vespa.objects.Serializer; +import java.nio.ByteBuffer; +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.Iterator; +import java.util.Map; + // TODO: Just inline all use of XmlSerializationHelper when the toXml methods in FieldValue subclasses are to be removed // TODO: More cleanup, the put() methods generate a lot of superfluous objects (write should call put, not the other way around) // TODO: remove pingpong between XmlSerializationHelper and FieldValue, this will go away when the toXml methods go away diff --git a/document/src/test/java/com/yahoo/document/json/DocumentUpdateJsonSerializerTest.java b/document/src/test/java/com/yahoo/document/json/DocumentUpdateJsonSerializerTest.java new file mode 100644 index 00000000000..de483186d6c --- /dev/null +++ b/document/src/test/java/com/yahoo/document/json/DocumentUpdateJsonSerializerTest.java @@ -0,0 +1,343 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.document.json; + +import com.fasterxml.jackson.core.JsonFactory; +import com.yahoo.document.ArrayDataType; +import com.yahoo.document.DataType; +import com.yahoo.document.DocumentType; +import com.yahoo.document.DocumentTypeManager; +import com.yahoo.document.DocumentUpdate; +import com.yahoo.document.Field; +import com.yahoo.document.MapDataType; +import com.yahoo.document.PositionDataType; +import com.yahoo.document.WeightedSetDataType; +import com.yahoo.text.Utf8; +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.InputStream; +import java.io.UnsupportedEncodingException; + +import static com.yahoo.test.json.JsonTestHelper.assertJsonEquals; +import static com.yahoo.test.json.JsonTestHelper.inputJson; + +/** + * @author Vegard Sjonfjell + */ +public class DocumentUpdateJsonSerializerTest { + + final static DocumentTypeManager types = new DocumentTypeManager(); + final static JsonFactory parserFactory = new JsonFactory(); + final static DocumentType docType = new DocumentType("doctype"); + + final static String DEFAULT_DOCUMENT_ID = "id:test:doctype::1"; + + static { + docType.addField(new Field("string_field", DataType.STRING)); + docType.addField(new Field("int_field", DataType.INT)); + docType.addField(new Field("float_field", DataType.FLOAT)); + docType.addField(new Field("double_field", DataType.DOUBLE)); + docType.addField(new Field("byte_field", DataType.BYTE)); + docType.addField(new Field("tensor_field", DataType.TENSOR)); + docType.addField(new Field("predicate_field", DataType.PREDICATE)); + docType.addField(new Field("raw_field", DataType.RAW)); + docType.addField(new Field("int_array", new ArrayDataType(DataType.INT))); + docType.addField(new Field("string_array", new ArrayDataType(DataType.STRING))); + docType.addField(new Field("int_set", new WeightedSetDataType(DataType.INT, true, true))); + docType.addField(new Field("string_set", new WeightedSetDataType(DataType.STRING, true, true))); + docType.addField(new Field("string_map", new MapDataType(DataType.STRING, DataType.STRING))); + docType.addField(new Field("singlepos_field", PositionDataType.INSTANCE)); + docType.addField(new Field("multipos_field", new ArrayDataType(PositionDataType.INSTANCE))); + types.registerDocumentType(docType); + } + + private static DocumentUpdate deSerializeDocumentUpdate(String jsonDoc, String docId) { + final InputStream rawDoc = new ByteArrayInputStream(Utf8.toBytes(jsonDoc)); + JsonReader reader = new JsonReader(types, rawDoc, parserFactory); + return (DocumentUpdate) reader.readSingleDocument(JsonReader.SupportedOperation.UPDATE, docId); + } + + private static String serializeDocumentUpdate(DocumentUpdate update) { + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + DocumentUpdateJsonSerializer serializer = new DocumentUpdateJsonSerializer(outputStream); + serializer.serialize(update); + + try { + return new String(outputStream.toByteArray(), "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + } + + private static void deSerializeAndSerializeJsonAndMatch(String jsonDoc) { + jsonDoc = jsonDoc.replaceFirst("DOCUMENT_ID", DEFAULT_DOCUMENT_ID); + DocumentUpdate update = deSerializeDocumentUpdate(jsonDoc, DEFAULT_DOCUMENT_ID); + assertJsonEquals(serializeDocumentUpdate(update), jsonDoc); + } + + @Test + public void testArithmeticUpdate() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'int_field': {", + " 'increment': 3.0", + " },", + " 'float_field': {", + " 'decrement': 1.5", + " },", + " 'double_field': {", + " 'divide': 3.2", + " },", + " 'byte_field': {", + " 'multiply': 2.0", + " }", + " }", + "}" + )); + } + + @Test + public void testAssignSimpleTypes() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'int_field': {", + " 'assign': 42", + " },", + " 'float_field': {", + " 'assign': 32.45", + " },", + " 'double_field': {", + " 'assign': 45.93", + " },", + " 'string_field': {", + " 'assign': \"My favorite string\"", + " },", + " 'byte_field': {", + " 'assign': 127", + " }", + " }", + "}" + )); + } + + @Test + public void testAssignWeightedSet() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'int_set': {", + " 'assign': {", + " '123': 456,", + " '789': 101112", + " }", + " },", + " 'string_set': {", + " 'assign': {", + " 'meow': 218478,", + " 'slurp': 2123", + " }", + " }", + " }", + "}" + )); + } + + @Test + public void testAddUpdate() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'int_array': {", + " 'add': [", + " 123,", + " 456,", + " 789", + " ]", + " },", + " 'string_array': {", + " 'add': [", + " 'bjarne',", + " 'andrei',", + " 'rich'", + " ]", + " }", + " }", + "}" + )); + } + + @Test + public void testRemoveUpdate() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'int_array': {", + " 'remove': [", + " 123,", + " 789", + " ]", + " },", + " 'string_array': {", + " 'remove': [", + " 'bjarne',", + " 'rich'", + " ]", + " }", + " }", + "}" + )); + } + + @Test + public void testMatchUpdateArithmetic() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'int_array': {", + " 'match': {", + " 'element': 456,", + " 'multiply': 8.0", + " }", + " }", + " }", + "}" + )); + } + + @Test + public void testMatchUpdateAssign() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'string_array': {", + " 'match': {", + " 'element': 3,", + " 'assign': 'kjeks'", + " }", + " }", + " }", + "}" + )); + } + + @Test + public void testAssignTensor() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'tensor_field': {", + " 'assign': {", + " 'dimensions': ['x','y','z'], ", + " 'cells': [", + " { 'address': { 'x': 'a', 'y': 'b' }, 'value': 2.0 },", + " { 'address': { 'x': 'c' }, 'value': 3.0 }", + " ]", + " }", + " }", + " }", + "}" + )); + } + + @Test + public void testAssignPredicate() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'predicate_field': {", + " 'assign': 'foo in [bar]'", + " }", + " }", + "}" + )); + } + + @Test + public void testAssignRaw() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'raw_field': {", + " 'assign': 'RG9uJ3QgYmVsaWV2ZSBoaXMgbGllcw==\\r\\n'", + " }", + " }", + "}" + )); + } + + @Test + public void testAssignMap() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'string_map': {", + " 'assign': [", + " { 'key': 'conversion gel', 'value': 'deadly'},", + " { 'key': 'repulsion gel', 'value': 'safe'},", + " { 'key': 'propulsion gel', 'value': 'insufficient data'}", + " ]", + " }", + " }", + "}" + )); + } + + @Test + public void testAssignSinglePos() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'singlepos_field': {", + " 'assign': 'N60.222333;E10.12'", + " }", + " }", + "}" + )); + } + + @Test + public void testAssignMultiPos() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'multipos_field': {", + " 'assign': [ 'N0.0;E0.0', 'S1.1;W1.1', 'N10.2;W122.2' ]", + " }", + " }", + "}" + )); + } + + @Test + public void testClearField() { + deSerializeAndSerializeJsonAndMatch(inputJson( + "{", + " 'update': 'DOCUMENT_ID',", + " 'fields': {", + " 'int_field': {", + " 'assign': null", + " },", + " 'string_field': {", + " 'assign': null", + " }", + " }", + "}" + )); + } +} diff --git a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java index 8236a32e94c..206ab8e30f0 100644 --- a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java +++ b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java @@ -29,8 +29,14 @@ import com.yahoo.document.datatypes.StringFieldValue; import com.yahoo.document.datatypes.Struct; import com.yahoo.document.datatypes.TensorFieldValue; import com.yahoo.document.datatypes.WeightedSet; -import com.yahoo.document.update.*; +import com.yahoo.document.update.AddValueUpdate; +import com.yahoo.document.update.ArithmeticValueUpdate; import com.yahoo.document.update.ArithmeticValueUpdate.Operator; +import com.yahoo.document.update.AssignValueUpdate; +import com.yahoo.document.update.ClearValueUpdate; +import com.yahoo.document.update.FieldUpdate; +import com.yahoo.document.update.MapValueUpdate; +import com.yahoo.document.update.ValueUpdate; import com.yahoo.tensor.MapTensor; import com.yahoo.text.Utf8; import org.apache.commons.codec.binary.Base64; @@ -53,6 +59,7 @@ import java.util.Map; import java.util.Random; import java.util.Set; +import static com.yahoo.test.json.JsonTestHelper.inputJson; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.*; @@ -60,7 +67,6 @@ import static org.junit.Assert.*; * Basic test of JSON streams to Vespa document instances. * * @author <a href="mailto:steinar@yahoo-inc.com">Steinar Knutsen</a> - * @author vegard */ public class JsonReaderTestCase { DocumentTypeManager types; @@ -1011,15 +1017,6 @@ public class JsonReaderTestCase { return new ByteArrayInputStream(Utf8.toBytes(json)); } - /** - * Convenience method to input JSON without escaping double quotes and newlines - * Each parameter represents a line of JSON encoded data - * The lines are joined with newline and single quotes are replaced with double quotes - */ - static String inputJson(String... lines) { - return Joiner.on("\n").join(lines).replaceAll("'", "\""); - } - @Test public void testParsingWithoutTensorField() { Document doc = createPutWithoutTensor().getDocument(); diff --git a/document/src/test/java/com/yahoo/document/json/JsonWriterTestCase.java b/document/src/test/java/com/yahoo/document/json/JsonWriterTestCase.java index 2e8354e3c6a..a587499a3c6 100644 --- a/document/src/test/java/com/yahoo/document/json/JsonWriterTestCase.java +++ b/document/src/test/java/com/yahoo/document/json/JsonWriterTestCase.java @@ -1,16 +1,11 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.document.json; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.core.io.JsonStringEncoder; +import com.fasterxml.jackson.databind.JsonMappingException; +import com.fasterxml.jackson.databind.ObjectMapper; import com.yahoo.document.ArrayDataType; import com.yahoo.document.DataType; import com.yahoo.document.Document; @@ -26,17 +21,21 @@ import com.yahoo.document.PositionDataType; import com.yahoo.document.StructDataType; import com.yahoo.document.WeightedSetDataType; import com.yahoo.document.datatypes.TensorFieldValue; +import com.yahoo.text.Utf8; import org.apache.commons.codec.binary.Base64; import org.junit.After; import org.junit.Before; import org.junit.Test; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonParseException; -import com.fasterxml.jackson.core.io.JsonStringEncoder; -import com.fasterxml.jackson.databind.JsonMappingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.yahoo.text.Utf8; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; /** * Functional tests for com.yahoo.document.json.JsonWriter. diff --git a/document/src/tests/documenttestcase.cpp b/document/src/tests/documenttestcase.cpp index 1fc1b27ca4b..352c22bd3fd 100644 --- a/document/src/tests/documenttestcase.cpp +++ b/document/src/tests/documenttestcase.cpp @@ -81,7 +81,7 @@ void DocumentTest::testSizeOf() CPPUNIT_ASSERT_EQUAL(120ul, sizeof(Document)); CPPUNIT_ASSERT_EQUAL(64ul, sizeof(StructFieldValue)); CPPUNIT_ASSERT_EQUAL(16ul, sizeof(StructuredFieldValue)); - CPPUNIT_ASSERT_EQUAL(112ul, sizeof(SerializableArray)); + CPPUNIT_ASSERT_EQUAL(120ul, sizeof(SerializableArray)); } void DocumentTest::testFieldPath() diff --git a/document/src/vespa/document/bucket/bucketid.h b/document/src/vespa/document/bucket/bucketid.h index f4ec15ff7c1..15d02958746 100644 --- a/document/src/vespa/document/bucket/bucketid.h +++ b/document/src/vespa/document/bucket/bucketid.h @@ -50,7 +50,7 @@ public: * typedef when needed we can alter this later with less code changes. */ typedef uint64_t Type; - typedef vespalib::Array<BucketId, vespalib::DefaultAlloc> List; + typedef vespalib::Array<BucketId> List; /** Create an initially unset bucket id. */ BucketId() : _id(0) {} /** Create a bucket id with the given raw unchecked content. */ diff --git a/document/src/vespa/document/fieldvalue/document.cpp b/document/src/vespa/document/fieldvalue/document.cpp index 79278a2e1ba..6681601ac52 100644 --- a/document/src/vespa/document/fieldvalue/document.cpp +++ b/document/src/vespa/document/fieldvalue/document.cpp @@ -4,7 +4,6 @@ #include <vespa/document/fieldvalue/document.h> #include <memory> -#include <boost/assign.hpp> #include <vespa/vespalib/util/crc.h> #include <vespa/document/base/documentid.h> #include <vespa/document/base/field.h> @@ -18,27 +17,30 @@ #include <vespa/vespalib/objects/nbostream.h> using vespalib::nbostream; +using vespalib::make_string; +using vespalib::IllegalArgumentException; +using vespalib::IllegalStateException; LOG_SETUP(".document.fieldvalue.document"); namespace document { namespace { -std::set<uint16_t> getAllowedVersions() { - std::set<uint16_t> allowed; - using namespace boost::assign; - allowed += 6, 7, 8; - return allowed; + +bool isLegalVersion(uint16_t version) { + return (6 <= version) && (version <= 8); } -const std::set<uint16_t> ALLOWED_VERSIONS(getAllowedVersions()); +void documentTypeError(const vespalib::stringref & name) __attribute__((noinline)); +void throwTypeMismatch(vespalib::stringref type, vespalib::stringref docidType) __attribute__((noinline)); -void documentTypeError(const vespalib::stringref & name) - __attribute__((noinline)); void documentTypeError(const vespalib::stringref & name) { - throw vespalib::IllegalArgumentException( - vespalib::make_string( - "Cannot generate a document with non-document type %s.", - name.c_str()), VESPA_STRLOC); + throw IllegalArgumentException(make_string("Cannot generate a document with non-document type %s.", name.c_str()), VESPA_STRLOC); +} + +void throwTypeMismatch(vespalib::stringref type, vespalib::stringref docidType) { + throw IllegalArgumentException(make_string("Trying to create a document with type %s that don't match the id (type %s).", + type.c_str(), docidType.c_str()), + VESPA_STRLOC); } const DataType &verifyDocumentType(const DataType *type) { @@ -78,12 +80,7 @@ Document::Document(const DataType &type, const DocumentId& documentId) { _fields.setDocumentType(getType()); if (documentId.hasDocType() && documentId.getDocType() != type.getName()) { - throw vespalib::IllegalArgumentException( - vespalib::make_string( - "Trying to create a document with type %s that " - "don't match the id (type %s).", - type.getName().c_str(), - documentId.getDocType().c_str())); + throwTypeMismatch(type.getName(), documentId.getDocType()); } } @@ -95,23 +92,14 @@ Document::Document(const DataType &type, DocumentId& documentId, bool iWillAllow { (void) iWillAllowSwap; _fields.setDocumentType(getType()); - if (documentId.hasDocType() && documentId.getDocType() != type.getName()) { - throw vespalib::IllegalArgumentException( - vespalib::make_string( - "Trying to create a document with type %s that " - "don't match the id (type %s).", - type.getName().c_str(), - documentId.getDocType().c_str())); + if (documentId.hasDocType() && (documentId.getDocType() != type.getName())) { + throwTypeMismatch(type.getName(), documentId.getDocType()); } _id.swap(documentId); } -Document::Document(const DocumentTypeRepo& repo, - ByteBuffer& buffer, - const DataType *anticipatedType) - : StructuredFieldValue(anticipatedType ? - verifyDocumentType(anticipatedType) : - *DataType::DOCUMENT), +Document::Document(const DocumentTypeRepo& repo, ByteBuffer& buffer, const DataType *anticipatedType) + : StructuredFieldValue(anticipatedType ? verifyDocumentType(anticipatedType) : *DataType::DOCUMENT), _id(), _fields(static_cast<const DocumentType &>(getType()).getFieldsType()), _lastModified(0) @@ -124,12 +112,8 @@ void Document::setRepo(const DocumentTypeRepo& repo) _fields.setRepo(repo); } -Document::Document(const DocumentTypeRepo& repo, - vespalib::nbostream & is, - const DataType *anticipatedType) - : StructuredFieldValue(anticipatedType ? - verifyDocumentType(anticipatedType) : - *DataType::DOCUMENT), +Document::Document(const DocumentTypeRepo& repo, vespalib::nbostream & is, const DataType *anticipatedType) + : StructuredFieldValue(anticipatedType ? verifyDocumentType(anticipatedType) : *DataType::DOCUMENT), _id(), _fields(static_cast<const DocumentType &>(getType()).getFieldsType()), _lastModified(0) @@ -137,21 +121,14 @@ Document::Document(const DocumentTypeRepo& repo, deserialize(repo, is); } -Document::Document(const DocumentTypeRepo& repo, - ByteBuffer& buffer, - bool includeContent, - const DataType *anticipatedType) - : StructuredFieldValue(anticipatedType ? - verifyDocumentType(anticipatedType) : - *DataType::DOCUMENT), +Document::Document(const DocumentTypeRepo& repo, ByteBuffer& buffer, bool includeContent, const DataType *anticipatedType) + : StructuredFieldValue(anticipatedType ? verifyDocumentType(anticipatedType) : *DataType::DOCUMENT), _id(), _fields(static_cast<const DocumentType &>(getType()).getFieldsType()), _lastModified(0) { if (!includeContent) { - const DocumentType *newDocType = deserializeDocHeaderAndType( - repo, buffer, _id, - static_cast<const DocumentType*>(anticipatedType)); + const DocumentType *newDocType = deserializeDocHeaderAndType(repo, buffer, _id, static_cast<const DocumentType*>(anticipatedType)); if (newDocType) { setType(*newDocType); } @@ -161,13 +138,8 @@ Document::Document(const DocumentTypeRepo& repo, } -Document::Document(const DocumentTypeRepo& repo, - ByteBuffer& header, - ByteBuffer& body, - const DataType *anticipatedType) - : StructuredFieldValue(anticipatedType ? - verifyDocumentType(anticipatedType) : - *DataType::DOCUMENT), +Document::Document(const DocumentTypeRepo& repo, ByteBuffer& header, ByteBuffer& body, const DataType *anticipatedType) + : StructuredFieldValue(anticipatedType ? verifyDocumentType(anticipatedType) : *DataType::DOCUMENT), _id(), _fields(static_cast<const DocumentType &>(getType()).getFieldsType()), _lastModified(0) @@ -225,14 +197,12 @@ Document::getIdFromSerialized(ByteBuffer& buf) } const DocumentType * -Document::getDocTypeFromSerialized(const DocumentTypeRepo& repo, - ByteBuffer& buf) +Document::getDocTypeFromSerialized(const DocumentTypeRepo& repo, ByteBuffer& buf) { int position = buf.getPos(); DocumentId retVal; - const DocumentType *docType(deserializeDocHeaderAndType( - repo, buf, retVal, NULL)); + const DocumentType *docType(deserializeDocHeaderAndType(repo, buf, retVal, NULL)); buf.setPos(position); return docType; @@ -348,23 +318,17 @@ void mainDocumentError(int64_t len) __attribute__((noinline)); void notEnoughDocumentError(int32_t len, int64_t remaining) __attribute__((noinline)); void versionError(uint16_t version) { - throw DeserializeException(vespalib::make_string( - "Unrecognized serialization version %d", version), - VESPA_STRLOC); + throw DeserializeException(make_string( "Unrecognized serialization version %d", version), VESPA_STRLOC); } void mainDocumentError(int64_t len) { - throw DeserializeException(vespalib::make_string( - "Document lengths past %i is not supported. Corrupt data " - "said length is %" PRId64 " bytes", + throw DeserializeException(make_string( + "Document lengths past %i is not supported. Corrupt data said length is %" PRId64 " bytes", std::numeric_limits<int>::max(), len), VESPA_STRLOC); } void notEnoughDocumentError(int32_t len, int64_t remaining) { - throw DeserializeException(vespalib::make_string( - "Buffer said document length is %i bytes, but only %li " - "bytes remain in buffer", - len, remaining)); + throw DeserializeException(make_string( "Buffer said document length is %i bytes, but only %li bytes remain in buffer", len, remaining)); } } @@ -375,7 +339,7 @@ Document::deserializeDocHeader(ByteBuffer& buffer, DocumentId& id) { int32_t len; buffer.getShortNetwork(version); - if (ALLOWED_VERSIONS.find(version) == ALLOWED_VERSIONS.end()) { + if ( ! isLegalVersion(version) ) { versionError(version); } else if (version < 7) { int64_t tmpLen = 0; @@ -394,8 +358,7 @@ Document::deserializeDocHeader(ByteBuffer& buffer, DocumentId& id) { if (len > (long)buffer.getRemaining()) { notEnoughDocumentError(len, buffer.getRemaining()); } else { - nbostream stream(buffer.getBufferAtPos(), buffer.getRemaining(), - false); + nbostream stream(buffer.getBufferAtPos(), buffer.getRemaining(), false); id = DocumentId(stream); buffer.incPos(stream.rp()); unsigned char contentByte; @@ -425,14 +388,12 @@ void Document::serializeBody(nbostream& stream) const { serializer.write(_fields, BodyFields()); } -void Document::deserialize(const DocumentTypeRepo& repo, - vespalib::nbostream & os) { +void Document::deserialize(const DocumentTypeRepo& repo, vespalib::nbostream & os) { VespaDocumentDeserializer deserializer(repo, os, 0); try { deserializer.read(*this); - } catch (const vespalib::IllegalStateException &e) { - throw DeserializeException( - std::string("Buffer out of bounds: ") + e.what()); + } catch (const IllegalStateException &e) { + throw DeserializeException(vespalib::string("Buffer out of bounds: ") + e.what()); } } diff --git a/document/src/vespa/document/select/compare.cpp b/document/src/vespa/document/select/compare.cpp index 2df33e00d22..d4cddd47be3 100644 --- a/document/src/vespa/document/select/compare.cpp +++ b/document/src/vespa/document/select/compare.cpp @@ -2,7 +2,6 @@ #include <vespa/fastos/fastos.h> #include "compare.h" -#include <boost/scoped_array.hpp> #include <iomanip> #include <vespa/log/log.h> #include <sstream> diff --git a/document/src/vespa/document/select/value.h b/document/src/vespa/document/select/value.h index b4e35d1528d..dc9e8289ea9 100644 --- a/document/src/vespa/document/select/value.h +++ b/document/src/vespa/document/select/value.h @@ -15,7 +15,6 @@ #pragma once -#include <boost/operators.hpp> #include <memory> #include <map> #include <string> diff --git a/document/src/vespa/document/update/fieldpathupdate.h b/document/src/vespa/document/update/fieldpathupdate.h index e6958082493..9703edf5d6c 100644 --- a/document/src/vespa/document/update/fieldpathupdate.h +++ b/document/src/vespa/document/update/fieldpathupdate.h @@ -1,11 +1,7 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once -#include <boost/operators.hpp> - -#include <boost/operators.hpp> #include <vespa/vespalib/objects/cloneable.h> - #include <vespa/document/datatype/datatype.h> #include <vespa/document/util/serializable.h> #include <vespa/document/util/xmlserializable.h> @@ -25,7 +21,6 @@ class BucketIdFactory; class FieldPathUpdate : public vespalib::Cloneable, public Printable, - public boost::equality_comparable<FieldPathUpdate>, public vespalib::Identifiable { protected: @@ -57,6 +52,9 @@ public: virtual FieldPathUpdate* clone() const = 0; virtual bool operator==(const FieldPathUpdate& other) const; + bool operator!=(const FieldPathUpdate& other) const { + return ! (*this == other); + } const FieldPath& getFieldPath() const { return *_fieldPath; } const select::Node& getWhereClause() const { return *_whereClause; } diff --git a/document/src/vespa/document/util/bytebuffer.cpp b/document/src/vespa/document/util/bytebuffer.cpp index 070d52ef8d9..4c8116482d5 100644 --- a/document/src/vespa/document/util/bytebuffer.cpp +++ b/document/src/vespa/document/util/bytebuffer.cpp @@ -21,6 +21,7 @@ #define LOG_DEBUG3(a,b,c) LOG_DEBUG1(vespalib::make_string(a,b,c)); #define LOG_DEBUG4(a,b,c,d) LOG_DEBUG1(vespalib::make_string(a,b,c,d)); +using vespalib::alloc::Alloc; using vespalib::DefaultAlloc; namespace document { @@ -59,7 +60,7 @@ ByteBuffer::ByteBuffer() : } ByteBuffer::ByteBuffer(size_t len) : - ByteBuffer(DefaultAlloc(len), len) + ByteBuffer(DefaultAlloc::create(len), len) { } @@ -74,7 +75,7 @@ ByteBuffer::ByteBuffer(const char* buffer, size_t len) : set(buffer, len); } -ByteBuffer::ByteBuffer(DefaultAlloc buffer, size_t len) : +ByteBuffer::ByteBuffer(Alloc buffer, size_t len) : _buffer(static_cast<char *>(buffer.get())), _len(len), _pos(0), @@ -114,7 +115,7 @@ ByteBuffer& ByteBuffer::operator=(const ByteBuffer & org) if (this != & org) { cleanUp(); if (org._len > 0 && org._buffer) { - DefaultAlloc(org._len + 1).swap(_ownedBuffer); + DefaultAlloc::create(org._len + 1).swap(_ownedBuffer); _buffer = static_cast<char *>(_ownedBuffer.get()); memcpy(_buffer,org._buffer,org._len); _buffer[org._len] = 0; @@ -189,7 +190,7 @@ ByteBuffer::sliceFrom(const ByteBuffer& buf, size_t from, size_t to) // throw (B // Slicing from someone that doesn't own their buffer, must make own copy. if (( buf._ownedBuffer.get() == NULL ) && (buf._bufHolder == NULL)) { cleanUp(); - DefaultAlloc(to-from + 1).swap(_ownedBuffer); + DefaultAlloc::create(to-from + 1).swap(_ownedBuffer); _buffer = static_cast<char *>(_ownedBuffer.get()); memcpy(_buffer, buf._buffer + from, to-from); _buffer[to-from] = 0; @@ -200,7 +201,7 @@ ByteBuffer::sliceFrom(const ByteBuffer& buf, size_t from, size_t to) // throw (B // Slicing from someone that owns, but hasn't made a reference counter yet. if (!buf._bufHolder) { - buf._bufHolder=new BufferHolder(std::move(const_cast<DefaultAlloc &>(buf._ownedBuffer))); + buf._bufHolder=new BufferHolder(std::move(const_cast<Alloc &>(buf._ownedBuffer))); } // Slicing from refcounter. @@ -218,7 +219,7 @@ ByteBuffer::sliceFrom(const ByteBuffer& buf, size_t from, size_t to) // throw (B ByteBuffer* ByteBuffer::copyBuffer(const char* buffer, size_t len) { if (buffer && len) { - DefaultAlloc newBuf(len + 1); + Alloc newBuf = DefaultAlloc::create(len + 1); memcpy(newBuf.get(), buffer, len); static_cast<char *>(newBuf.get())[len] = 0; return new ByteBuffer(std::move(newBuf), len); @@ -265,7 +266,7 @@ void ByteBuffer::flip() } -ByteBuffer::BufferHolder::BufferHolder(DefaultAlloc buffer) +ByteBuffer::BufferHolder::BufferHolder(Alloc buffer) : _buffer(std::move(buffer)) { } @@ -753,7 +754,7 @@ void ByteBuffer::cleanUp() { _bufHolder->subRef(); _bufHolder = NULL; } else { - DefaultAlloc().swap(_ownedBuffer); + Alloc().swap(_ownedBuffer); } _buffer = NULL; } diff --git a/document/src/vespa/document/util/bytebuffer.h b/document/src/vespa/document/util/bytebuffer.h index cfb72e092a5..9207d47350d 100644 --- a/document/src/vespa/document/util/bytebuffer.h +++ b/document/src/vespa/document/util/bytebuffer.h @@ -73,7 +73,7 @@ public: * @param buffer The buffer to represent. * @param len The length of the buffer */ - ByteBuffer(vespalib::DefaultAlloc buffer, size_t len); + ByteBuffer(vespalib::alloc::Alloc buffer, size_t len); /** * Sets the buffer pointed to by this buffer. Allows for multiple @@ -393,10 +393,10 @@ public: BufferHolder& operator=(const BufferHolder &); public: - BufferHolder(vespalib::DefaultAlloc buffer); + BufferHolder(vespalib::alloc::Alloc buffer); virtual ~BufferHolder(); - vespalib::DefaultAlloc _buffer; + vespalib::alloc::Alloc _buffer; }; ByteBuffer(BufferHolder* buf, size_t pos, size_t len, size_t limit); @@ -409,7 +409,7 @@ private: size_t _pos; size_t _limit; mutable BufferHolder * _bufHolder; - vespalib::DefaultAlloc _ownedBuffer; + vespalib::alloc::Alloc _ownedBuffer; public: std::string toString(); diff --git a/document/src/vespa/document/util/compressor.cpp b/document/src/vespa/document/util/compressor.cpp index f11283c7c40..9f0c6913325 100644 --- a/document/src/vespa/document/util/compressor.cpp +++ b/document/src/vespa/document/util/compressor.cpp @@ -11,6 +11,12 @@ LOG_SETUP(".document.compressor"); #include <lz4.h> #include <lz4hc.h> +using vespalib::alloc::Alloc; +using vespalib::DefaultAlloc; +using vespalib::ConstBufferRef; +using vespalib::DataBuffer; +using vespalib::make_string; + namespace document { @@ -24,10 +30,10 @@ LZ4Compressor::process(const CompressionConfig& config, const void * inputV, siz char * output(static_cast<char *>(outputV)); int sz(-1); if (config.compressionLevel > 6) { - vespalib::DefaultAlloc state(LZ4_sizeofStateHC()); + Alloc state = DefaultAlloc::create(LZ4_sizeofStateHC()); sz = LZ4_compressHC2_withStateHC(state.get(), input, output, inputLen, config.compressionLevel); } else { - vespalib::DefaultAlloc state(LZ4_sizeofState()); + Alloc state = DefaultAlloc::create(LZ4_sizeofState()); sz = LZ4_compress_withState(state.get(), input, output, inputLen); } if (sz != 0) { @@ -52,7 +58,7 @@ LZ4Compressor::unprocess(const void * inputV, size_t inputLen, void * outputV, s } CompressionConfig::Type -compress(ICompressor & compressor, const CompressionConfig & compression, const vespalib::ConstBufferRef & org, vespalib::DataBuffer & dest) +compress(ICompressor & compressor, const CompressionConfig & compression, const ConstBufferRef & org, DataBuffer & dest) { CompressionConfig::Type type(CompressionConfig::NONE); dest.ensureFree(compressor.adjustProcessLen(0, org.size())); @@ -67,7 +73,7 @@ compress(ICompressor & compressor, const CompressionConfig & compression, const } CompressionConfig::Type -docompress(const CompressionConfig & compression, const vespalib::ConstBufferRef & org, vespalib::DataBuffer & dest) +docompress(const CompressionConfig & compression, const ConstBufferRef & org, DataBuffer & dest) { CompressionConfig::Type type(CompressionConfig::NONE); switch (compression.type) { @@ -85,7 +91,7 @@ docompress(const CompressionConfig & compression, const vespalib::ConstBufferRef } CompressionConfig::Type -compress(const CompressionConfig & compression, const vespalib::ConstBufferRef & org, vespalib::DataBuffer & dest, bool allowSwap) +compress(const CompressionConfig & compression, const ConstBufferRef & org, DataBuffer & dest, bool allowSwap) { CompressionConfig::Type type(CompressionConfig::NONE); if (org.size() >= compression.minSize) { @@ -93,7 +99,7 @@ compress(const CompressionConfig & compression, const vespalib::ConstBufferRef & } if (type == CompressionConfig::NONE) { if (allowSwap) { - vespalib::DataBuffer tmp(const_cast<char *>(org.c_str()), org.size()); + DataBuffer tmp(const_cast<char *>(org.c_str()), org.size()); tmp.moveFreeToData(org.size()); dest.swap(tmp); } else { @@ -105,27 +111,22 @@ compress(const CompressionConfig & compression, const vespalib::ConstBufferRef & void -decompress(ICompressor & decompressor, size_t uncompressedLen, const vespalib::ConstBufferRef & org, vespalib::DataBuffer & dest, bool allowSwap) +decompress(ICompressor & decompressor, size_t uncompressedLen, const ConstBufferRef & org, DataBuffer & dest, bool allowSwap) { dest.ensureFree(uncompressedLen); size_t realUncompressedLen(dest.getFreeLen()); if ( ! decompressor.unprocess(org.c_str(), org.size(), dest.getFree(), realUncompressedLen) ) { if ( uncompressedLen < realUncompressedLen) { if (allowSwap) { - vespalib::DataBuffer tmp(const_cast<char *>(org.c_str()), org.size()); + DataBuffer tmp(const_cast<char *>(org.c_str()), org.size()); tmp.moveFreeToData(org.size()); dest.swap(tmp); } else { dest.writeBytes(org.c_str(), org.size()); } } else { - throw std::runtime_error( - vespalib::make_string("unprocess failed had %" PRIu64 - ", wanted %" PRId64 - ", got %" PRIu64, - org.size(), - uncompressedLen, - realUncompressedLen)); + throw std::runtime_error(make_string("unprocess failed had %" PRIu64 ", wanted %" PRId64 ", got %" PRIu64, + org.size(), uncompressedLen, realUncompressedLen)); } } else { dest.moveFreeToData(realUncompressedLen); @@ -133,7 +134,7 @@ decompress(ICompressor & decompressor, size_t uncompressedLen, const vespalib::C } void -decompress(const CompressionConfig::Type & type, size_t uncompressedLen, const vespalib::ConstBufferRef & org, vespalib::DataBuffer & dest, bool allowSwap) +decompress(const CompressionConfig::Type & type, size_t uncompressedLen, const ConstBufferRef & org, DataBuffer & dest, bool allowSwap) { switch (type) { case CompressionConfig::LZ4: @@ -145,7 +146,7 @@ decompress(const CompressionConfig::Type & type, size_t uncompressedLen, const v case CompressionConfig::NONE: case CompressionConfig::UNCOMPRESSABLE: if (allowSwap) { - vespalib::DataBuffer tmp(const_cast<char *>(org.c_str()), org.size()); + DataBuffer tmp(const_cast<char *>(org.c_str()), org.size()); tmp.moveFreeToData(org.size()); dest.swap(tmp); } else { @@ -153,7 +154,7 @@ decompress(const CompressionConfig::Type & type, size_t uncompressedLen, const v } break; default: - throw std::runtime_error(vespalib::make_string("Unable to handle decompression of type '%d'", type)); + throw std::runtime_error(make_string("Unable to handle decompression of type '%d'", type)); break; } } diff --git a/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccess.java b/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccess.java index 0d781e4ca95..bad692f0a0d 100644 --- a/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccess.java +++ b/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccess.java @@ -41,13 +41,13 @@ import com.yahoo.config.subscription.ConfigSubscriber; * <p>Access to this class is thread-safe.</p> * * @author bratseth - * @author <a href="mailto:einarmr@yahoo-inc.com">Einar Rosenvinge</a> - * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a> + * @author Einar Rosenvinge + * @author Simon Thoresen */ public abstract class DocumentAccess { - protected DocumentTypeManager documentMgr; - protected ConfigSubscriber documentTypeManagerConfig; + private final DocumentTypeManager documentTypeManager; + private final ConfigSubscriber documentTypeConfigSubscriber; /** * <p>This is a convenience method to return a document access object with @@ -69,8 +69,14 @@ public abstract class DocumentAccess { */ protected DocumentAccess(DocumentAccessParams params) { super(); - documentMgr = new DocumentTypeManager(); - documentTypeManagerConfig = DocumentTypeManagerConfigurer.configure(documentMgr, params.getDocumentManagerConfigId()); + if (params.documentmanagerConfig().isPresent()) { // our config has been injected into the creator + documentTypeManager = new DocumentTypeManager(params.documentmanagerConfig().get()); + documentTypeConfigSubscriber = null; + } + else { // fallback to old style subscription + documentTypeManager = new DocumentTypeManager(); + documentTypeConfigSubscriber = DocumentTypeManagerConfigurer.configure(documentTypeManager, params.getDocumentManagerConfigId()); + } } /** @@ -154,11 +160,15 @@ public abstract class DocumentAccess { public abstract SubscriptionSession openSubscription(SubscriptionParameters parameters); /** - * <p>Shuts down the underlying sessions used by this DocumentAccess; + * Shuts down the underlying sessions used by this DocumentAccess; * subsequent use of this DocumentAccess will throw unspecified exceptions, - * depending on implementation.</p> + * depending on implementation. + * Classes overriding this must call super.shutdown(). */ - public abstract void shutdown(); + public void shutdown() { + if (documentTypeConfigSubscriber != null) + documentTypeConfigSubscriber.close(); + } /** * <p>Returns the {@link DocumentTypeManager} used by this @@ -167,6 +177,6 @@ public abstract class DocumentAccess { * @return The document type manager. */ public DocumentTypeManager getDocumentTypeManager() { - return documentMgr; + return documentTypeManager; } } diff --git a/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccessParams.java b/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccessParams.java index 57cfdbd32e1..701fafbab06 100755 --- a/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccessParams.java +++ b/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccessParams.java @@ -1,6 +1,10 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.documentapi;
+import com.yahoo.document.config.DocumentmanagerConfig;
+
+import java.util.Optional;
+
/**
* Superclass of the classes which contains the parameters for creating or opening a document access.
*
@@ -8,26 +12,27 @@ package com.yahoo.documentapi; */
public class DocumentAccessParams {
- // The id to resolve to document manager config.
+ /** The id to resolve to document manager config. Not needed if the config is passed here */
private String documentManagerConfigId = "client";
- /**
- * Returns the config id that the document manager should subscribe to.
- *
- * @return The config id.
- */
- public String getDocumentManagerConfigId() {
- return documentManagerConfigId;
- }
+ /** The document manager config, or empty if not provided (in which case a subscription must be created) */
+ private Optional<DocumentmanagerConfig> documentmanagerConfig = Optional.empty();
+
+ /** Returns the config id that the document manager should subscribe to. */
+ public String getDocumentManagerConfigId() { return documentManagerConfigId; }
+
+ /** Returns the document manager config to use, or empty if it it necessary to subscribe to get it */
+ public Optional<DocumentmanagerConfig> documentmanagerConfig() { return documentmanagerConfig; }
- /**
- * Sets the config id that the document manager should subscribe to.
- *
- * @param configId The config id.
- * @return This, to allow chaining.
- */
+ /** Sets the config id that the document manager should subscribe to. */
public DocumentAccessParams setDocumentManagerConfigId(String configId) {
documentManagerConfigId = configId;
return this;
}
+
+ public DocumentAccessParams setDocumentmanagerConfig(DocumentmanagerConfig documentmanagerConfig) {
+ this.documentmanagerConfig = Optional.of(documentmanagerConfig);
+ return this;
+ }
+
}
\ No newline at end of file diff --git a/documentapi/src/main/java/com/yahoo/documentapi/local/LocalDocumentAccess.java b/documentapi/src/main/java/com/yahoo/documentapi/local/LocalDocumentAccess.java index edcefe9447d..ab1b5e7cdd6 100644 --- a/documentapi/src/main/java/com/yahoo/documentapi/local/LocalDocumentAccess.java +++ b/documentapi/src/main/java/com/yahoo/documentapi/local/LocalDocumentAccess.java @@ -22,13 +22,6 @@ public class LocalDocumentAccess extends DocumentAccess { } @Override - public void shutdown() { - if (documentTypeManagerConfig != null) { - documentTypeManagerConfig.close(); - } - } - - @Override public SyncSession createSyncSession(SyncParameters parameters) { return new LocalSyncSession(this); } @@ -57,4 +50,5 @@ public class LocalDocumentAccess extends DocumentAccess { public SubscriptionSession openSubscription(SubscriptionParameters parameters) { throw new UnsupportedOperationException("Not supported yet"); } + } diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusDocumentAccess.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusDocumentAccess.java index 818bc204784..0a57a700276 100644 --- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusDocumentAccess.java +++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusDocumentAccess.java @@ -7,8 +7,11 @@ import com.yahoo.document.select.parser.ParseException; import com.yahoo.documentapi.*; import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol; import com.yahoo.messagebus.MessageBus; +import com.yahoo.messagebus.NetworkMessageBus; import com.yahoo.messagebus.RPCMessageBus; import com.yahoo.messagebus.network.Network; +import com.yahoo.messagebus.network.local.LocalNetwork; +import com.yahoo.messagebus.network.local.LocalWire; import com.yahoo.messagebus.routing.RoutingTable; import java.util.concurrent.Executors; @@ -17,16 +20,18 @@ import java.util.concurrent.ScheduledExecutorService; /** * This class implements the {@link DocumentAccess} interface using message bus for communication. * - * @author <a href="mailto:einarmr@yahoo-inc.com">Einar Rosenvinge</a> + * @author Einar Rosenvinge * @author bratseth */ public class MessageBusDocumentAccess extends DocumentAccess { - private final RPCMessageBus bus; + private final NetworkMessageBus bus; + private final MessageBusParams params; // TODO: make pool size configurable? ScheduledExecutorService is not dynamic - private final ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool( - Runtime.getRuntime().availableProcessors(), ThreadFactoryFactory.getDaemonThreadFactory("mbus.access.scheduler")); + private final ScheduledExecutorService scheduledExecutorService = + Executors.newScheduledThreadPool(Runtime.getRuntime().availableProcessors(), + ThreadFactoryFactory.getDaemonThreadFactory("mbus.access.scheduler")); /** * Creates a new document access using default values for all parameters. @@ -46,32 +51,38 @@ public class MessageBusDocumentAccess extends DocumentAccess { try { com.yahoo.messagebus.MessageBusParams mbusParams = new com.yahoo.messagebus.MessageBusParams(params.getMessageBusParams()); mbusParams.addProtocol(new DocumentProtocol(getDocumentTypeManager(), params.getProtocolConfigId(), params.getLoadTypes())); - bus = new RPCMessageBus(mbusParams, - params.getRPCNetworkParams(), - params.getRoutingConfigId()); + if (System.getProperty("vespa.local", "false").equals("true")) { // set by Application when running locally + LocalNetwork network = new LocalNetwork(); + bus = new NetworkMessageBus(network, new MessageBus(network, mbusParams)); + } + else { + bus = new RPCMessageBus(mbusParams, params.getRPCNetworkParams(), params.getRoutingConfigId()); + } } catch (Exception e) { throw new DocumentAccessException(e); } } + + private MessageBus messageBus() { + return bus.getMessageBus(); + } @Override public void shutdown() { + super.shutdown(); bus.destroy(); - if (documentTypeManagerConfig != null) { - documentTypeManagerConfig.close(); - } scheduledExecutorService.shutdownNow(); } @Override public MessageBusSyncSession createSyncSession(SyncParameters parameters) { - return new MessageBusSyncSession(parameters, bus.getMessageBus(), this.params); + return new MessageBusSyncSession(parameters, messageBus(), this.params); } @Override public MessageBusAsyncSession createAsyncSession(AsyncParameters parameters) { - return new MessageBusAsyncSession(parameters, bus.getMessageBus(), this.params); + return new MessageBusAsyncSession(parameters, messageBus(), this.params); } @Override @@ -101,34 +112,20 @@ public class MessageBusDocumentAccess extends DocumentAccess { throw new UnsupportedOperationException("Subscriptions not supported."); } - /** - * Returns the internal message bus object so that clients can use it directly. - * - * @return The internal message bus. - */ - public MessageBus getMessageBus() { - return bus.getMessageBus(); - } + /** Returns the internal message bus object so that clients can use it directly. */ + public MessageBus getMessageBus() { return messageBus(); } /** * Returns the network layer of the internal message bus object so that clients can use it directly. This may seem * abit arbitrary, but the fact is that the RPCNetwork actually implements the IMirror API as well as exposing the * SystemState object. - * - * @return The network layer. */ - public Network getNetwork() { - return bus.getRPCNetwork(); - } + public Network getNetwork() { return bus.getNetwork(); } /** * Returns the parameter object that controls the underlying message bus. Changes to these parameters do not affect * previously created sessions. - * - * @return The parameter object. */ - public MessageBusParams getParams() { - return params; - } + public MessageBusParams getParams() { return params; } } diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/loadtypes/LoadTypeSet.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/loadtypes/LoadTypeSet.java index cb453559ab1..b9129bf3b85 100644 --- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/loadtypes/LoadTypeSet.java +++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/loadtypes/LoadTypeSet.java @@ -23,6 +23,7 @@ import java.util.TreeMap; * load types yourself with addType(). */ public class LoadTypeSet { + class DualMap { Map<String, LoadType> nameMap = new TreeMap<String, LoadType>(); Map<Integer, LoadType> idMap = new HashMap<Integer, LoadType>(); @@ -49,6 +50,10 @@ public class LoadTypeSet { configure(new ConfigGetter<>(LoadTypeConfig.class).getConfig(configId)); } + public LoadTypeSet(LoadTypeConfig loadTypeConfig) { + configure(loadTypeConfig); + } + public Map<String, LoadType> getNameMap() { return map.nameMap; } diff --git a/fastlib/src/vespa/fastlib/io/bufferedfile.cpp b/fastlib/src/vespa/fastlib/io/bufferedfile.cpp index 84eb8534cda..adf201a6e1e 100644 --- a/fastlib/src/vespa/fastlib/io/bufferedfile.cpp +++ b/fastlib/src/vespa/fastlib/io/bufferedfile.cpp @@ -392,7 +392,7 @@ size_t computeBufLen(size_t buflen) Fast_BufferedFile::Fast_BufferedFile(FastOS_FileInterface *file, size_t bufferSize) : FastOS_FileInterface(), _fileleft(static_cast<uint64_t>(-1)), - _buf(computeBufLen(bufferSize)), + _buf(vespalib::alloc::MMapAllocFactory::create(computeBufLen(bufferSize))), _bufi(NULL), _bufe(NULL), _filepos(0), diff --git a/fastlib/src/vespa/fastlib/io/bufferedfile.h b/fastlib/src/vespa/fastlib/io/bufferedfile.h index 275e066a849..7443b325dd4 100644 --- a/fastlib/src/vespa/fastlib/io/bufferedfile.h +++ b/fastlib/src/vespa/fastlib/io/bufferedfile.h @@ -23,7 +23,7 @@ class Fast_BufferedFile : public FastOS_FileInterface, public vespalib::noncopyable { private: - typedef vespalib::MMapAlloc Alloc; + using Alloc = vespalib::alloc::Alloc; /** The number of bytes left in the file. */ int64_t _fileleft; /** Pointer to the start of the buffer. Correctly aligned for direct IO */ diff --git a/fastlib/src/vespa/fastlib/io/bufferedinputstream.cpp b/fastlib/src/vespa/fastlib/io/bufferedinputstream.cpp index 9b761d2e272..7b1da7b3293 100644 --- a/fastlib/src/vespa/fastlib/io/bufferedinputstream.cpp +++ b/fastlib/src/vespa/fastlib/io/bufferedinputstream.cpp @@ -1,24 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -//************************************************************************ -/** - * Implmentation of Fast_BufferedInputStream - * - * @author Markus Bjartveit Kr�ger - * @version $Id$ - */ - /* - * Creation date : 2001-10-29 - * Copyright (c) : 1997-2002 Fast Search & Transfer ASA - * ALL RIGHTS RESERVED - *************************************************************************/ #include <vespa/fastos/fastos.h> #include "bufferedinputstream.h" - - - -Fast_BufferedInputStream::Fast_BufferedInputStream(Fast_InputStream &in, - size_t bufferSize) +Fast_BufferedInputStream::Fast_BufferedInputStream(Fast_InputStream &in, size_t bufferSize) : Fast_FilterInputStream(in), _buffer(new char[bufferSize]), _bufferSize((_buffer != NULL) ? bufferSize : 0), @@ -28,89 +12,70 @@ Fast_BufferedInputStream::Fast_BufferedInputStream(Fast_InputStream &in, { } - - -Fast_BufferedInputStream::~Fast_BufferedInputStream(void) +Fast_BufferedInputStream::~Fast_BufferedInputStream() { delete [] _buffer; -}; - - +} -ssize_t Fast_BufferedInputStream::Available(void) +ssize_t +Fast_BufferedInputStream::Available() { return _in->Available() + _bufferUsed - _bufferRead; } - - -bool Fast_BufferedInputStream::Close(void) +bool +Fast_BufferedInputStream::Close() { return _in->Close(); } - - -ssize_t Fast_BufferedInputStream::Skip(size_t skipNBytes) +ssize_t +Fast_BufferedInputStream::Skip(size_t skipNBytes) { ssize_t numBytesSkipped = 0; - if (_nextWillFail) - { + if (_nextWillFail) { _nextWillFail = false; return -1; } - if (skipNBytes > _bufferUsed - _bufferRead) - { + if (skipNBytes > _bufferUsed - _bufferRead) { // First, skip all bytes in buffer numBytesSkipped = _bufferUsed - _bufferRead; _bufferUsed = _bufferRead = 0; // Skip rest of bytes in slave stream ssize_t slaveSkipped = _in->Skip(skipNBytes - numBytesSkipped); - if (slaveSkipped < 0) - { - if (numBytesSkipped > 0) - { + if (slaveSkipped < 0) { + if (numBytesSkipped > 0) { _nextWillFail = true; - } - else - { + } else { numBytesSkipped = slaveSkipped; } - } - else - { + } else { numBytesSkipped += slaveSkipped; } - } - else - { + } else { // Skip all skipNBytes in buffer _bufferRead += skipNBytes; - if (_bufferRead == _bufferUsed) - { + if (_bufferRead == _bufferUsed) { _bufferUsed = _bufferRead = 0; } numBytesSkipped = skipNBytes; } - return numBytesSkipped; } - - -ssize_t Fast_BufferedInputStream::Read(void *targetBuffer, size_t length) +ssize_t +Fast_BufferedInputStream::Read(void *targetBuffer, size_t length) { // This function will under no circumstance read more than once from // its slave stream, in order to prevent blocking on input. - if (_nextWillFail) - { + if (_nextWillFail) { _nextWillFail = false; return -1; } @@ -119,22 +84,17 @@ ssize_t Fast_BufferedInputStream::Read(void *targetBuffer, size_t length) char* to = static_cast<char*>(targetBuffer); size_t bufferRemain = _bufferUsed - _bufferRead; - if (length <= bufferRemain) - { + if (length <= bufferRemain) { memcpy(to, &_buffer[_bufferRead], length); numBytesRead += length; _bufferRead += length; - if (_bufferRead == _bufferUsed) - { + if (_bufferRead == _bufferUsed) { _bufferRead = _bufferUsed = 0; } - } - else - { + } else { // Use the data currently in the buffer, then read from slave stream. - if (bufferRemain > 0) - { + if (bufferRemain > 0) { memcpy(to, &_buffer[_bufferRead], bufferRemain); numBytesRead += bufferRemain; length -= bufferRemain; @@ -146,54 +106,37 @@ ssize_t Fast_BufferedInputStream::Read(void *targetBuffer, size_t length) // If remaining data to be read can fit in the buffer, put it // there, otherwise read directly to receiver and empty the buffer. - if (length < _bufferSize) - { + if (length < _bufferSize) { slaveRead = Fast_FilterInputStream::Read(_buffer, _bufferSize); - } - else - { + } else { slaveRead = Fast_FilterInputStream::Read(to, length); } - if (slaveRead > 0) - { - if (length < _bufferSize) - { + if (slaveRead > 0) { + if (length < _bufferSize) { // We read to buffer, so copy from buffer to receiver. - if (length < static_cast<size_t>(slaveRead)) - { + if (length < static_cast<size_t>(slaveRead)) { memcpy(to, _buffer, length); numBytesRead += length; _bufferUsed = slaveRead; _bufferRead = length; - } - else - { + } else { memcpy(to, _buffer, slaveRead); numBytesRead += slaveRead; } - } - else - { + } else { // We read directly to receiver, no need to copy. numBytesRead += slaveRead; } - } - else if (slaveRead == 0) - { + } else if (slaveRead == 0) { // Do nothing - } - else - { + } else { // slaveRead < 0, so an error occurred while reading from the // slave. If there was data in the buffer, report success and // fail on next operation instead. - if (numBytesRead > 0) - { + if (numBytesRead > 0) { _nextWillFail = true; - } - else - { + } else { numBytesRead = slaveRead; } } @@ -203,10 +146,8 @@ ssize_t Fast_BufferedInputStream::Read(void *targetBuffer, size_t length) return numBytesRead; } - -ssize_t Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer, - size_t maxlength, - char stopChar) +ssize_t +Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer, size_t maxlength, char stopChar) { if (maxlength > _bufferSize) @@ -215,8 +156,7 @@ ssize_t Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer, // This function will under no circumstance read more than once from // its slave stream, in order to prevent blocking on input. - if (_nextWillFail) - { + if (_nextWillFail) { _nextWillFail = false; return -1; } @@ -239,22 +179,17 @@ ssize_t Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer, } } - if (maxlength <= bufferRemain) - { + if (maxlength <= bufferRemain) { memcpy(to, &_buffer[_bufferRead], maxlength); numBytesRead += maxlength; _bufferRead += maxlength; - if (_bufferRead == _bufferUsed) - { + if (_bufferRead == _bufferUsed) { _bufferRead = _bufferUsed = 0; } - } - else - { + } else { // Use the data currently in the buffer, then read from slave stream. - if (bufferRemain > 0) - { + if (bufferRemain > 0) { memcpy(to, &_buffer[_bufferRead], bufferRemain); numBytesRead += bufferRemain; maxlength -= bufferRemain; @@ -265,8 +200,7 @@ ssize_t Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer, ssize_t slaveRead; slaveRead = Fast_FilterInputStream::Read(_buffer, _bufferSize); - if (slaveRead > 0) - { + if (slaveRead > 0) { for (offset = 0; offset < static_cast<uint32_t>(slaveRead); offset++) { if(_buffer[offset] == stopChar) { break; @@ -276,42 +210,31 @@ ssize_t Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer, if (offset >= maxlength) { // Discard data if character was not present numBytesRead = -1; - } - else { + } else { // Found character in buffer if (offset < static_cast<uint32_t>(slaveRead)) { maxlength = offset + 1; } // We read to buffer, so copy from buffer to receiver. - if (maxlength < static_cast<size_t>(slaveRead)) - { + if (maxlength < static_cast<size_t>(slaveRead)) { memcpy(to, _buffer, maxlength); numBytesRead += maxlength; _bufferUsed = slaveRead; _bufferRead = maxlength; - } - else - { + } else { memcpy(to, _buffer, slaveRead); numBytesRead += slaveRead; } } - } - else if (slaveRead == 0) - { + } else if (slaveRead == 0) { // Do nothing - } - else - { + } else { // slaveRead < 0, so an error occurred while reading from the // slave. If there was data in the buffer, report success and // fail on next operation instead. - if (numBytesRead > 0) - { + if (numBytesRead > 0) { _nextWillFail = true; - } - else - { + } else { numBytesRead = slaveRead; } } diff --git a/fastlib/src/vespa/fastlib/io/bufferedinputstream.h b/fastlib/src/vespa/fastlib/io/bufferedinputstream.h index 183e3494215..b102bd98abc 100644 --- a/fastlib/src/vespa/fastlib/io/bufferedinputstream.h +++ b/fastlib/src/vespa/fastlib/io/bufferedinputstream.h @@ -1,61 +1,37 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -//************************************************************************ -/** - * Class definitions for Fast_BufferedInputStream - * - * @author Markus Bjartveit Kr�ger - * @version $Id$ - */ - /* - * Creation date : 2001-10-29 - * Copyright (c) : 1997-2002 Fast Search & Transfer ASA - * ALL RIGHTS RESERVED - *************************************************************************/ #pragma once #include <vespa/fastlib/io/filterinputstream.h> - - - - class Fast_BufferedInputStream : public Fast_FilterInputStream { - private: - - // Prevent use of: - Fast_BufferedInputStream(const Fast_BufferedInputStream &); - Fast_BufferedInputStream & operator=(const Fast_BufferedInputStream &); - - protected: - // Buffer attributes - char *_buffer; - size_t _bufferSize; - size_t _bufferUsed; // Amount of buffer currently holding data - size_t _bufferRead; // How far buffer has been read - bool _nextWillFail; + char *_buffer; + const size_t _bufferSize; + size_t _bufferUsed; // Amount of buffer currently holding data + size_t _bufferRead; // How far buffer has been read + bool _nextWillFail; public: + Fast_BufferedInputStream(const Fast_BufferedInputStream &) = delete; + Fast_BufferedInputStream & operator = (const Fast_BufferedInputStream &) = delete; // Constructor Fast_BufferedInputStream(Fast_InputStream &in, size_t bufferSize = 1024); // Destructor - virtual ~Fast_BufferedInputStream(void); + virtual ~Fast_BufferedInputStream(); // Subclassed methods - virtual ssize_t Available(void); - virtual bool Close(void); - virtual ssize_t Skip(size_t skipNBytes); - virtual ssize_t Read(void *targetBuffer, size_t length); + ssize_t Available() override; + bool Close() override; + ssize_t Skip(size_t skipNBytes) override; + ssize_t Read(void *targetBuffer, size_t length) override; // Additional methods - ssize_t ReadBufferFullUntil(void *targetBuffer, - size_t maxlength, - char stopChar); + ssize_t ReadBufferFullUntil(void *targetBuffer, size_t maxlength, char stopChar); }; diff --git a/fastlib/src/vespa/fastlib/net/httpheaderparser.cpp b/fastlib/src/vespa/fastlib/net/httpheaderparser.cpp index 22174acfec9..aa56372277c 100644 --- a/fastlib/src/vespa/fastlib/net/httpheaderparser.cpp +++ b/fastlib/src/vespa/fastlib/net/httpheaderparser.cpp @@ -3,50 +3,40 @@ #include <vespa/fastlib/io/bufferedinputstream.h> #include <vespa/fastlib/net/httpheaderparser.h> - - Fast_HTTPHeaderParser::Fast_HTTPHeaderParser(Fast_BufferedInputStream &in) : _pushBack(0), _isPushBacked(false), + _bufferSize(16384), + _lineBuffer(new char[_bufferSize]), _input(&in) { } - - Fast_HTTPHeaderParser::~Fast_HTTPHeaderParser(void) { + delete [] _lineBuffer; } - - -bool Fast_HTTPHeaderParser::ReadRequestLine(const char *&method, - const char *&url, - int &versionMajor, - int &versionMinor) +bool +Fast_HTTPHeaderParser::ReadRequestLine(const char *&method, const char *&url, int &versionMajor, int &versionMinor) { // Read a single line from input. Repeat if line is blank, to cope // with buggy HTTP/1.1 clients that print extra empty lines at the // end of requests. - do - { - int idx = 0; - size_t readLen = - _input->ReadBufferFullUntil(_lineBuffer, - static_cast<size_t> - (HTTPHEADERPARSER_LINE_BUFFER_SIZE), - '\n'); + do { + size_t idx = 0; + ssize_t readLen = _input->ReadBufferFullUntil(_lineBuffer, _bufferSize, '\n'); if (readLen <= 0) { return false; } idx = readLen-1; - if (idx == 0 || _lineBuffer[idx] != '\n') + if (idx == 0 || _lineBuffer[idx] != '\n') { return false; + } _lineBuffer[idx--] = '\0'; - if (_lineBuffer[idx] == '\r') - { + if (_lineBuffer[idx] == '\r') { _lineBuffer[idx] = '\0'; } } while (_lineBuffer[0] == '\0'); @@ -58,20 +48,17 @@ bool Fast_HTTPHeaderParser::ReadRequestLine(const char *&method, method = p; p = strchr(p, ' '); - if (p != NULL) - { + if (p != NULL) { *p++ = '\0'; url = p; p = strchr(p, ' '); - if (p != NULL) - { + if (p != NULL) { *p++ = '\0'; version = p; } } - if (sscanf(version, "HTTP/%d.%d", &versionMajor, &versionMinor) != 2) - { + if (sscanf(version, "HTTP/%d.%d", &versionMajor, &versionMinor) != 2) { versionMajor = versionMinor = -1; return false; } @@ -79,37 +66,31 @@ bool Fast_HTTPHeaderParser::ReadRequestLine(const char *&method, return true; } -bool Fast_HTTPHeaderParser::ReadHeader(const char *&name, const char *&value) +bool +Fast_HTTPHeaderParser::ReadHeader(const char *&name, const char *&value) { - int idx = 0; + size_t idx = 0; name = NULL; value = NULL; - if (_isPushBacked) - { + if (_isPushBacked) { idx = 0; _lineBuffer[idx] = _pushBack; _isPushBacked = false; idx++; } - while (idx<HTTPHEADERPARSER_LINE_BUFFER_SIZE-1) - { - - size_t readLen = - _input->ReadBufferFullUntil(&_lineBuffer[idx], - static_cast<size_t> - (HTTPHEADERPARSER_LINE_BUFFER_SIZE), - '\n'); + constexpr size_t ROOM_FOR_PUSH_BACK = 1u; + while ((idx + ROOM_FOR_PUSH_BACK) < _bufferSize) { + ssize_t readLen = _input->ReadBufferFullUntil(&_lineBuffer[idx], _bufferSize - idx - ROOM_FOR_PUSH_BACK, '\n'); if (readLen <= 0) { return false; } idx += readLen - 1; // Empty line == end of headers. // handle case with \r\n as \n - if (idx == 0 || (_lineBuffer[0] == '\r' && idx == 1)) - { + if (idx == 0 || (_lineBuffer[0] == '\r' && idx == 1)) { idx = 0; break; } @@ -123,52 +104,44 @@ bool Fast_HTTPHeaderParser::ReadHeader(const char *&name, const char *&value) } // Check if header continues on next line. - if (_input->Read(&_pushBack, 1) != 1) + if (_input->Read(&_pushBack, 1) != 1) { break; - if (_pushBack == ' ' || _pushBack == '\t') - { + } + if (_pushBack == ' ' || _pushBack == '\t') { // Header does continue on next line. // Replace newline with horizontal whitespace. _lineBuffer[idx] = _pushBack; idx++; - } - else - { + } else { _isPushBacked = true; // break out of while loop break; } - } - if (idx != 0) - { + if (idx != 0) { _lineBuffer[idx] = '\0'; char *p = _lineBuffer; name = p; // Find end of header name. - while (*p != ':' && *p != '\0') - { + while (*p != ':' && *p != '\0') { p++; } // If end of header name is not end of header, parse header value. - if (*p != '\0') - { + if (*p != '\0') { // Terminate header name. *p++ = '\0'; // Skip leading whitespace before header value. - while (*p == ' ' || *p == '\t') - { + while (*p == ' ' || *p == '\t') { p++; } value = p; // Strip trailing whitespace. p += strlen(p); - while (p > value && (*(p-1) == ' ' || *(p-1) == '\t')) - { + while (p > value && (*(p-1) == ' ' || *(p-1) == '\t')) { p--; } *p = '\0'; diff --git a/fastlib/src/vespa/fastlib/net/httpheaderparser.h b/fastlib/src/vespa/fastlib/net/httpheaderparser.h index 5bb41e787a7..59c5cb01f8a 100644 --- a/fastlib/src/vespa/fastlib/net/httpheaderparser.h +++ b/fastlib/src/vespa/fastlib/net/httpheaderparser.h @@ -1,53 +1,24 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -/** -******************************************************************************* -* -* @author Markus Bjartveit Kr�ger -* @date Creation date: 2000-11-22 -* @version $Id$ -* -* @file -* -* HTTP header parser. -* -* Copyright (c) : 2001 Fast Search & Transfer ASA -* ALL RIGHTS RESERVED -* -******************************************************************************/ - #pragma once - - class Fast_BufferedInputStream; - -#define HTTPHEADERPARSER_LINE_BUFFER_SIZE 4096 - - class Fast_HTTPHeaderParser { - private: - // Prevent use of: - Fast_HTTPHeaderParser(const Fast_HTTPHeaderParser &); - Fast_HTTPHeaderParser & operator=(const Fast_HTTPHeaderParser &); - protected: - char _pushBack; - bool _isPushBacked; - char _lineBuffer[HTTPHEADERPARSER_LINE_BUFFER_SIZE]; - Fast_BufferedInputStream *_input; - public: + Fast_HTTPHeaderParser(const Fast_HTTPHeaderParser &) = delete; + Fast_HTTPHeaderParser & operator = (const Fast_HTTPHeaderParser &) = delete; Fast_HTTPHeaderParser(Fast_BufferedInputStream &in); - virtual ~Fast_HTTPHeaderParser(void); - + ~Fast_HTTPHeaderParser(); // Methods - bool ReadRequestLine(const char *&method, const char *&url, - int &versionMajor, int &versionMinor); + bool ReadRequestLine(const char *&method, const char *&url, int &versionMajor, int &versionMinor); bool ReadHeader(const char *&name, const char *&value); + private: + char _pushBack; + bool _isPushBacked; + const size_t _bufferSize; + char *_lineBuffer; + Fast_BufferedInputStream *_input; }; - - - diff --git a/fileacquirer/src/vespa/fileacquirer/CMakeLists.txt b/fileacquirer/src/vespa/fileacquirer/CMakeLists.txt index 6afe392dddd..1353636af2d 100644 --- a/fileacquirer/src/vespa/fileacquirer/CMakeLists.txt +++ b/fileacquirer/src/vespa/fileacquirer/CMakeLists.txt @@ -4,4 +4,3 @@ vespa_add_library(fileacquirer STATIC DEPENDS ) vespa_generate_config(fileacquirer filedistributorrpc.def) -install(FILES filedistributorrpc.def DESTINATION var/db/vespa/config_server/serverdb/classes) diff --git a/filedistribution/src/apps/filedistributor/filedistributor.cpp b/filedistribution/src/apps/filedistributor/filedistributor.cpp index 0c89fb08b6b..8625549dc19 100644 --- a/filedistribution/src/apps/filedistributor/filedistributor.cpp +++ b/filedistribution/src/apps/filedistributor/filedistributor.cpp @@ -5,8 +5,6 @@ #include <cstdlib> #include <boost/program_options.hpp> -#include <boost/exception/diagnostic_information.hpp> -#include <boost/scope_exit.hpp> #include <vespa/fastos/app.h> #include <vespa/config-zookeepers.h> @@ -57,7 +55,29 @@ class FileDistributor : public config::IFetcherCallback<ZookeepersConfig>, const std::shared_ptr<StateServerImpl> _stateServer; private: - std::thread _downloaderEventLoopThread; + class GuardedThread { + public: + GuardedThread(const GuardedThread &) = delete; + GuardedThread & operator = (const GuardedThread &) = delete; + GuardedThread(const std::shared_ptr<FileDownloader> & downloader) : + _downloader(downloader), + _thread([downloader=_downloader] () { downloader->runEventLoop(); }) + { } + ~GuardedThread() { + _downloader->close(); + if (_thread.joinable()) { + _thread.join(); + } + if ( !_downloader->drained() ) { + LOG(error, "The filedownloader did not drain fully. We will just exit quickly and let a restart repair it for us."); + std::quick_exit(67); + } + } + private: + std::shared_ptr<FileDownloader> _downloader; + std::thread _thread; + }; + std::unique_ptr<GuardedThread> _downloaderEventLoopThread; config::ConfigFetcher _configFetcher; template <class T> @@ -74,22 +94,16 @@ class FileDistributor : public config::IFetcherCallback<ZookeepersConfig>, const FiledistributorConfig& fileDistributorConfig, const FiledistributorrpcConfig& rpcConfig) :_zk(track(new ZKFacade(zooKeepersConfig.zookeeperserverlist))), - _model(track(new FileDistributionModelImpl( - fileDistributorConfig.hostname, - fileDistributorConfig.torrentport, - _zk))), + _model(track(new FileDistributionModelImpl(fileDistributorConfig.hostname, fileDistributorConfig.torrentport, _zk))), _tracker(track(new FileDistributorTrackerImpl(_model))), - _downloader(track(new FileDownloader(_tracker, - fileDistributorConfig.hostname, - fileDistributorConfig.torrentport, - boost::filesystem::path(fileDistributorConfig.filedbpath)))), + _downloader(track(new FileDownloader(_tracker, fileDistributorConfig.hostname, fileDistributorConfig.torrentport, Path(fileDistributorConfig.filedbpath)))), _manager(track(new FileDownloaderManager(_downloader, _model))), _rpcHandler(track(new FileDistributorRPC(rpcConfig.connectionspec, _manager))), _stateServer(track(new StateServerImpl(fileDistributorConfig.stateport))), - _downloaderEventLoopThread([downloader=_downloader] () { downloader->runEventLoop(); }), + _downloaderEventLoopThread(), _configFetcher(configUri.getContext()) - { + _downloaderEventLoopThread = std::make_unique<GuardedThread>(_downloader); _manager->start(); _rpcHandler->start(); @@ -109,8 +123,7 @@ class FileDistributor : public config::IFetcherCallback<ZookeepersConfig>, //Do not waste time retrying zookeeper operations when going down. _zk->disableRetries(); - _downloader->close(); - _downloaderEventLoopThread.join(); + _downloaderEventLoopThread.reset(); } }; @@ -274,50 +287,29 @@ FileDistributorApplication::Main() { EV_STOPPING(programName, "Clean exit"); return 0; } catch(const FileDoesNotExistException & e) { - std::string s = boost::diagnostic_information(e); - EV_STOPPING(programName, s.c_str()); + EV_STOPPING(programName, e.what()); return 1; } catch(const ZKNodeDoesNotExistsException & e) { - std::string s = boost::diagnostic_information(e); - EV_STOPPING(programName, s.c_str()); + EV_STOPPING(programName, e.what()); return 2; } catch(const ZKSessionExpired & e) { - std::string s = boost::diagnostic_information(e); - EV_STOPPING(programName, s.c_str()); + EV_STOPPING(programName, e.what()); return 3; } catch(const config::ConfigTimeoutException & e) { - std::string s = boost::diagnostic_information(e); - EV_STOPPING(programName, s.c_str()); + EV_STOPPING(programName, e.what()); return 4; - } catch(const FailedListeningException & e) { - std::string s = boost::diagnostic_information(e); - EV_STOPPING(programName, s.c_str()); + } catch(const vespalib::PortListenException & e) { + EV_STOPPING(programName, e.what()); return 5; + } catch(const ZKConnectionLossException & e) { + EV_STOPPING(programName, e.what()); + return 6; + } catch(const ZKFailedConnecting & e) { + EV_STOPPING(programName, e.what()); + return 7; } catch(const ZKGenericException & e) { - std::string s = boost::diagnostic_information(e); - EV_STOPPING(programName, s.c_str()); + EV_STOPPING(programName, e.what()); return 99; - } catch(const boost::unknown_exception & e) { - std::string s = boost::diagnostic_information(e); - LOG(warning, "Caught '%s'", s.c_str()); - EV_STOPPING(programName, s.c_str()); - return 255; -#if 0 - /* - These are kept hanging around for reference as to how it was when we just held our ears - singing "na, na, na, na..." no matter if the sun was shining or if the world imploded. - */ - } catch(const boost::exception& e) { - std::string s = boost::diagnostic_information(e); - LOG(error, "Caught '%s'", s.c_str()); - EV_STOPPING(programName, s.c_str()); - return -1; - } catch(const std::string& msg) { - std::string s = "Error: " + msg; - LOG(error, "Caught '%s'", s.c_str()); - EV_STOPPING(programName, s.c_str()); - return -1; -#endif } } diff --git a/filedistribution/src/apps/status/status-filedistribution.cpp b/filedistribution/src/apps/status/status-filedistribution.cpp index d7dc62e29c4..87fa04e503b 100644 --- a/filedistribution/src/apps/status/status-filedistribution.cpp +++ b/filedistribution/src/apps/status/status-filedistribution.cpp @@ -112,7 +112,7 @@ printStatusRetryIfZKProblem(const std::string& zkservers, const std::string& zkL try { return printStatus(zkservers); } catch (ZKNodeDoesNotExistsException& e) { - LOG(debug, "Node does not exists, assuming concurrent update. %s", boost::diagnostic_information(e).c_str()); + LOG(debug, "Node does not exists, assuming concurrent update. %s", e.what()); } catch (ZKSessionExpired& e) { LOG(debug, "Session expired."); diff --git a/filedistribution/src/tests/common/testCommon.cpp b/filedistribution/src/tests/common/testCommon.cpp index 699d3628547..1902d4ec7db 100644 --- a/filedistribution/src/tests/common/testCommon.cpp +++ b/filedistribution/src/tests/common/testCommon.cpp @@ -12,13 +12,13 @@ namespace fd = filedistribution; const size_t bufferCapacity = 10; -fd::Move<fd::Buffer> +fd::Buffer getBuffer() { const char* test = "test"; fd::Buffer buffer(test, test + strlen(test)); buffer.reserve(bufferCapacity); buffer.push_back(0); - return fd::move(buffer); + return buffer; } BOOST_AUTO_TEST_CASE(bufferTest) { diff --git a/filedistribution/src/tests/rpc/CMakeLists.txt b/filedistribution/src/tests/rpc/CMakeLists.txt index faf23f1d464..88ed4527449 100644 --- a/filedistribution/src/tests/rpc/CMakeLists.txt +++ b/filedistribution/src/tests/rpc/CMakeLists.txt @@ -4,6 +4,7 @@ vespa_add_executable(filedistribution_rpc_test_app TEST testfileprovider.cpp DEPENDS filedistribution_filedistributorrpc + filedistribution_common ) target_compile_options(filedistribution_rpc_test_app PRIVATE -DTORRENT_DEBUG -DTORRENT_DISABLE_ENCRYPTION -DTORRENT_DISABLE_DHT -DWITH_SHIPPED_GEOIP_H -DBOOST_ASIO_HASH_MAP_BUCKETS=1021 -DBOOST_EXCEPTION_DISABLE -DBOOST_ASIO_ENABLE_CANCELIO -DBOOST_ASIO_DYN_LINK -DTORRENT_LINKING_SHARED) vespa_add_target_system_dependency(filedistribution_rpc_test_app boost boost_system-mt-d) diff --git a/filedistribution/src/tests/rpc/mockfileprovider.h b/filedistribution/src/tests/rpc/mockfileprovider.h index 745acc7196c..230cd0d0382 100644 --- a/filedistribution/src/tests/rpc/mockfileprovider.h +++ b/filedistribution/src/tests/rpc/mockfileprovider.h @@ -14,11 +14,11 @@ public: boost::barrier _queueForeverBarrier; - boost::optional<boost::filesystem::path> getPath(const std::string& fileReference) { + boost::optional<Path> getPath(const std::string& fileReference) { if (fileReference == "dd") { - return boost::filesystem::path("direct/result/path"); + return Path("direct/result/path"); } else { - return boost::optional<boost::filesystem::path>(); + return boost::optional<Path>(); } } diff --git a/filedistribution/src/tests/zkfacade/test-zkfacade.cpp b/filedistribution/src/tests/zkfacade/test-zkfacade.cpp index ada601742db..8589cb95d10 100644 --- a/filedistribution/src/tests/zkfacade/test-zkfacade.cpp +++ b/filedistribution/src/tests/zkfacade/test-zkfacade.cpp @@ -36,7 +36,7 @@ struct Watcher : public ZKFacade::NodeChangedWatcher { struct Fixture { ComponentsDeleter _componentsDeleter; std::shared_ptr<ZKFacade> zk; - ZKFacade::Path testNode; + Path testNode; Fixture() { zoo_set_debug_level(ZOO_LOG_LEVEL_WARN); @@ -109,7 +109,7 @@ BOOST_AUTO_TEST_CASE(createSequenceNode) { zk->setData(testNode, "", 0); - ZKFacade::Path prefix = testNode / "prefix"; + Path prefix = testNode / "prefix"; zk->createSequenceNode(prefix, "test", 4); zk->createSequenceNode(prefix, "test", 4); zk->createSequenceNode(prefix, "test", 4); @@ -148,7 +148,7 @@ BOOST_AUTO_TEST_CASE(retainOnly) BOOST_AUTO_TEST_CASE(addEphemeralNode) { - ZKFacade::Path ephemeralNode = "/test-ephemeral-node"; + Path ephemeralNode = "/test-ephemeral-node"; zk->removeIfExists(ephemeralNode); //Checked deleter is ok here since we're not installing any watchers diff --git a/filedistribution/src/tests/zkfiledbmodel/test-zkfiledbmodel.cpp b/filedistribution/src/tests/zkfiledbmodel/test-zkfiledbmodel.cpp index 6a3a87aac96..c102a235603 100644 --- a/filedistribution/src/tests/zkfiledbmodel/test-zkfiledbmodel.cpp +++ b/filedistribution/src/tests/zkfiledbmodel/test-zkfiledbmodel.cpp @@ -16,11 +16,8 @@ using namespace filedistribution; -typedef boost::filesystem::path Path; - namespace { - struct Fixture { ComponentsDeleter _componentsDeleter; std::shared_ptr<ZKFacade> zk; diff --git a/filedistribution/src/vespa/filedistribution/common/buffer.h b/filedistribution/src/vespa/filedistribution/common/buffer.h index 2325b3e11ee..79927119e1f 100644 --- a/filedistribution/src/vespa/filedistribution/common/buffer.h +++ b/filedistribution/src/vespa/filedistribution/common/buffer.h @@ -5,37 +5,6 @@ namespace filedistribution { -struct USED_FOR_MOVING {}; - -template <class T> -class Move { - mutable T _holder; -public: - Move(T& toMove) - :_holder(USED_FOR_MOVING()) - { - _holder.swap(toMove); - } - - Move(const Move& other) - :_holder(USED_FOR_MOVING()) - { - _holder.swap(other._holder); - } - - void swap(T& t) const { - _holder.swap(t); - } - -private: - Move& operator=(const Move&); -}; - -template <class T> -inline Move<T> move(T& t) { - return Move<T>(t); -} - class Buffer { size_t _capacity; char* _buf; @@ -54,13 +23,15 @@ public: _size(0) {} - Buffer(const Move<Buffer>& buffer); - - explicit Buffer(USED_FOR_MOVING) - :_capacity(0), - _buf(0), - _size(0) - {} + Buffer(Buffer && rhs) : + _capacity(rhs._capacity), + _buf(rhs._buf), + _size(rhs._size) + { + rhs._capacity = 0; + rhs._size = 0; + rhs._buf = nullptr; + } template <typename ITER> Buffer(ITER beginIter, ITER endIter) @@ -75,13 +46,8 @@ public: delete[] _buf; } - size_t capacity() const { - return _capacity; - } - - size_t size() const { - return _size; - } + size_t capacity() const { return _capacity; } + size_t size() const { return _size; } //might expose uninitialized memory void resize(size_t newSize) { @@ -114,39 +80,14 @@ public: _buf[_size++] = c; } - iterator begin() { - return _buf; - } - - iterator end() { - return _buf + _size; - } - - const_iterator begin() const { - return _buf; - } - - const_iterator end() const { - return _buf + _size; - } - - char operator[](size_t i) const { - return _buf[i]; - } - - char& operator[](size_t i) { - return _buf[i]; - } + iterator begin() { return _buf; } + iterator end() { return _buf + _size; } + const_iterator begin() const { return _buf; } + const_iterator end() const { return _buf + _size; } + char operator[](size_t i) const { return _buf[i]; } + char& operator[](size_t i) { return _buf[i]; } }; -inline Buffer::Buffer(const Move<Buffer>& buffer) - :_capacity(0), - _buf(0), - _size(0) -{ - buffer.swap(*this); -} - } //namespace filedistribution diff --git a/filedistribution/src/vespa/filedistribution/common/exception.cpp b/filedistribution/src/vespa/filedistribution/common/exception.cpp index 7195a99d702..c9bb5f43ab0 100644 --- a/filedistribution/src/vespa/filedistribution/common/exception.cpp +++ b/filedistribution/src/vespa/filedistribution/common/exception.cpp @@ -2,23 +2,8 @@ #include <vespa/fastos/fastos.h> #include "exception.h" -#include <execinfo.h> +namespace filedistribution { -std::ostream& -filedistribution::operator<<(std::ostream& stream, const Backtrace& backtrace) { - char** strings = backtrace_symbols( - &*backtrace._frames.begin(), backtrace._size); +VESPA_IMPLEMENT_EXCEPTION(FileDoesNotExistException, vespalib::Exception); - stream <<"Backtrace:" <<std::endl; - for (size_t i = 0; i<backtrace._size; ++i) { - stream <<strings[i] <<std::endl; - } - - free(strings); - return stream; } - - -filedistribution::Backtrace::Backtrace() - :_size(backtrace(&*_frames.begin(), _frames.size())) -{} diff --git a/filedistribution/src/vespa/filedistribution/common/exception.h b/filedistribution/src/vespa/filedistribution/common/exception.h index ee1f931ade0..f1d1a6c7052 100644 --- a/filedistribution/src/vespa/filedistribution/common/exception.h +++ b/filedistribution/src/vespa/filedistribution/common/exception.h @@ -1,66 +1,13 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once -#include <boost/exception/all.hpp> -#include <boost/current_function.hpp> -#include <boost/exception/all.hpp> -#include <boost/array.hpp> -#include <boost/version.hpp> +#include <vespa/vespalib/util/exceptions.h> +#include <boost/filesystem/path.hpp> namespace filedistribution { -class Backtrace { - static const size_t _maxBacktraceSize = 200; - public: - boost::array<void*, _maxBacktraceSize> _frames; - const size_t _size; - - Backtrace(); -}; +using Path = boost::filesystem::path; +VESPA_DEFINE_EXCEPTION(FileDoesNotExistException, vespalib::Exception); -std::ostream& operator<<(std::ostream& stream, const Backtrace& backtrace); - -namespace errorinfo { -typedef boost::error_info<struct tag_Backtrace, Backtrace> Backtrace; -typedef boost::error_info<struct tag_UserMessage, Backtrace> ExplanationForUser; -typedef boost::error_info<struct tag_TorrentMessage, std::string> TorrentMessage; } - -//Exceptions should inherit virtually from boost and std exception, -//see http://www.boost.org/doc/libs/1_39_0/libs/exception/doc/using_virtual_inheritance_in_exception_types.html -struct Exception : virtual boost::exception, virtual std::exception { - Exception() { - *this << errorinfo::Backtrace(Backtrace()); - } -}; - -} //namespace filedistribution - -#if BOOST_VERSION < 103700 -#define BOOST_THROW_EXCEPTION(x)\ - ::boost::throw_exception( ::boost::enable_error_info(x) << \ - ::boost::throw_function(BOOST_CURRENT_FUNCTION) << \ - ::boost::throw_file(__FILE__) << \ - ::boost::throw_line((int)__LINE__) ) - -#endif - - -//********** Begin: Please remove when fixed upstream. -//boost 1.36 & 1.37 bugfix: allow attaching a boost::filesytem::path to a boost::exception -//using the error info mechanism. -#include <boost/filesystem/path.hpp> - -namespace boost{ -namespace to_string_detail { -std::basic_ostream<boost::filesystem::path::string_type::value_type, - boost::filesystem::path::string_type::traits_type > & -operator<< -( std::basic_ostream<boost::filesystem::path::string_type::value_type, - boost::filesystem::path::string_type::traits_type >& os, const boost::filesystem::path & ph ); -} -} - -//********** End: Please remove when fixed upstream. - diff --git a/filedistribution/src/vespa/filedistribution/common/vespa_logfwd.cpp b/filedistribution/src/vespa/filedistribution/common/vespa_logfwd.cpp index b944e7ebbb9..c5785f9e24e 100644 --- a/filedistribution/src/vespa/filedistribution/common/vespa_logfwd.cpp +++ b/filedistribution/src/vespa/filedistribution/common/vespa_logfwd.cpp @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/fastos/fastos.h> #include <stdarg.h> -#include <boost/scoped_array.hpp> #include "logfwd.h" @@ -35,13 +34,14 @@ void filedistribution::logfwd::log_forward(LogLevel level, const char* file, int if (logger.wants(vespaLogLevel)) { const size_t maxSize(0x8000); - boost::scoped_array<char> payload(new char[maxSize]); + std::vector<char> payload(maxSize); + char * buf = &payload[0]; va_list args; va_start(args, fmt); - vsnprintf(payload.get(), maxSize, fmt, args); + vsnprintf(buf, maxSize, fmt, args); va_end(args); - logger.doLog(vespaLogLevel, file, line, "%s", payload.get()); + logger.doLog(vespaLogLevel, file, line, "%s", buf); } } diff --git a/filedistribution/src/vespa/filedistribution/distributor/filedistributortrackerimpl.cpp b/filedistribution/src/vespa/filedistribution/distributor/filedistributortrackerimpl.cpp index 7eb0ab957ff..6420dfd6006 100644 --- a/filedistribution/src/vespa/filedistribution/distributor/filedistributortrackerimpl.cpp +++ b/filedistribution/src/vespa/filedistribution/distributor/filedistributortrackerimpl.cpp @@ -5,17 +5,14 @@ #include <libtorrent/tracker_manager.hpp> #include <libtorrent/torrent.hpp> #include <vespa/filedistribution/model/filedistributionmodel.h> +#include <vespa/filedistribution/model/zkfacade.h> #include "filedownloader.h" #include "hostname.h" #include <vespa/log/log.h> LOG_SETUP(".filedistributiontrackerimpl"); -using filedistribution::FileDistributorTrackerImpl; -using filedistribution::FileDownloader; -using filedistribution::FileDistributionModel; -using filedistribution::Scheduler; -using filedistribution::TorrentSP; +using namespace filedistribution; typedef FileDistributionModel::PeerEntries PeerEntries; @@ -162,6 +159,17 @@ FileDistributorTrackerImpl::trackingRequest( } } +void asioWorker(asio::io_service& ioService) +{ + while (!ioService.stopped()) { + try { + ioService.run(); + } catch (const ZKConnectionLossException & e) { + LOG(info, "Connection loss in asioWorker thread, resuming. %s", e.what()); + } + } +} + void FileDistributorTrackerImpl::setDownloader(const std::shared_ptr<FileDownloader>& downloader) { @@ -171,6 +179,6 @@ FileDistributorTrackerImpl::setDownloader(const std::shared_ptr<FileDownloader>& _downloader = downloader; if (downloader) { - _scheduler.reset(new Scheduler([] (asio::io_service& ioService) { ioService.run(); })); + _scheduler.reset(new Scheduler([] (asio::io_service& ioService) { asioWorker(ioService); })); } } diff --git a/filedistribution/src/vespa/filedistribution/distributor/filedistributortrackerimpl.h b/filedistribution/src/vespa/filedistribution/distributor/filedistributortrackerimpl.h index bf72a2b80df..abb7367974a 100644 --- a/filedistribution/src/vespa/filedistribution/distributor/filedistributortrackerimpl.h +++ b/filedistribution/src/vespa/filedistribution/distributor/filedistributortrackerimpl.h @@ -4,9 +4,6 @@ #include <libtorrent/session.hpp> #include <libtorrent/torrent.hpp> -#include <boost/asio/io_service.hpp> -#include <boost/asio/deadline_timer.hpp> - #include <vespa/filedistribution/model/filedistributionmodel.h> #include "scheduler.h" #include <mutex> @@ -26,7 +23,7 @@ class FileDistributorTrackerImpl : public FileDistributionTracker { //Use separate worker thread to avoid potential deadlock //between tracker requests and files to download changed requests. - boost::scoped_ptr<Scheduler> _scheduler; + std::unique_ptr<Scheduler> _scheduler; public: FileDistributorTrackerImpl(const std::shared_ptr<FileDistributionModel>& model); diff --git a/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp b/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp index 7d5d7acceb2..38080e5239c 100644 --- a/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp +++ b/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp @@ -1,5 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/fastos/fastos.h> +#include <vespa/filedistribution/model/zkfacade.h> #include "filedownloader.h" #include "hostname.h" @@ -19,7 +20,7 @@ #include <vespa/log/log.h> LOG_SETUP(".filedownloader"); -using filedistribution::FileDownloader; +using namespace filedistribution; namespace fs = boost::filesystem; using libtorrent::sha1_hash; @@ -124,6 +125,10 @@ createSessionSettings() { } //anonymous namespace +namespace filedistribution { + VESPA_IMPLEMENT_EXCEPTION(NoSuchTorrentException, vespalib::Exception); +} + struct FileDownloader::EventHandler { FileDownloader& _fileDownloader; @@ -137,7 +142,7 @@ struct FileDownloader::EventHandler } void operator()(const libtorrent::listen_failed_alert& alert) const { - BOOST_THROW_EXCEPTION(FailedListeningException(alert.endpoint.address().to_string(), alert.endpoint.port(), alert.message())); + throw vespalib::PortListenException(alert.endpoint.port(), alert.endpoint.address().to_string(), alert.message(), VESPA_STRLOC); } void operator()(const libtorrent::fastresume_rejected_alert& alert) const { LOG(debug, "alert %s: %s", alert.what(), alert.message().c_str()); @@ -214,8 +219,9 @@ FileDownloader::FileDownloader(const std::shared_ptr<FileDistributionTracker>& t _hostName(hostName), _port(port) { - if (!fs::exists(_dbPath)) + if (!fs::exists(_dbPath)) { fs::create_directories(_dbPath); + } addNewDbFiles(_dbPath); _session.set_settings(createSessionSettings()); @@ -226,18 +232,29 @@ FileDownloader::FileDownloader(const std::shared_ptr<FileDistributionTracker>& t } -FileDownloader::~FileDownloader() { +void +FileDownloader::drain() { EventHandler eventHandler(this); size_t cnt = 0; + size_t waitCount = 0; do { - LOG(debug, "destructor waiting for %zu SRD alerts", _outstanding_SRD_requests); + LOG(debug, "destructor waiting for %zu SRD alerts", _outstanding_SRD_requests.load()); while (_session.wait_for_alert(libtorrent::milliseconds(20))) { std::unique_ptr<libtorrent::alert> alert = _session.pop_alert(); eventHandler.handle(std::move(alert)); ++cnt; } - } while (_outstanding_SRD_requests > 0); - LOG(debug, "handled %zu alerts in destructor", cnt); + waitCount++; + } while (!drained() && (waitCount < 1000)); + LOG(debug, "handled %zu alerts during draining.", cnt); + if (!drained()) { + LOG(error, "handled %zu alerts during draining. But there are still %zu left.", cnt, _outstanding_SRD_requests.load()); + LOG(error, "We have been waiting for stuff that did not happen."); + } +} + +FileDownloader::~FileDownloader() { + assert(drained()); } void @@ -249,7 +266,7 @@ FileDownloader::listen() { if (!ec && (_session.listen_port() == _port)) { return; } - BOOST_THROW_EXCEPTION(FailedListeningException(_hostName, _port)); + throw vespalib::PortListenException(_port, _hostName, VESPA_STRLOC); } boost::optional< fs::path > @@ -299,6 +316,7 @@ FileDownloader::hasTorrent(const std::string& fileReference) const { void FileDownloader::addTorrent(const std::string& fileReference, const Buffer& buffer) { + if (closed()) { return; } LockGuard guard(_modifyTorrentsDownloadingMutex); boost::optional<ResumeDataBuffer> resumeData = getResumeData(fileReference); @@ -308,8 +326,7 @@ FileDownloader::addTorrent(const std::string& fileReference, const Buffer& buffe libtorrent::lazy_entry entry; #pragma GCC diagnostic ignored "-Wdeprecated-declarations" - libtorrent::lazy_bdecode(&*buffer.begin(), &*buffer.end(), - entry); //out + libtorrent::lazy_bdecode(&*buffer.begin(), &*buffer.end(), entry); //out #pragma GCC diagnostic pop libtorrent::add_torrent_params torrentParams; @@ -319,8 +336,9 @@ FileDownloader::addTorrent(const std::string& fileReference, const Buffer& buffe torrentParams.auto_managed = false; torrentParams.paused = false; - if (resumeData) + if (resumeData) { torrentParams.resume_data = *resumeData; //vector will be swapped + } libtorrent::torrent_handle torrentHandle = _session.add_torrent(torrentParams); @@ -348,6 +366,7 @@ FileDownloader::deleteTorrentData(const libtorrent::torrent_handle& torrent, Loc void FileDownloader::removeAllTorrentsBut(const std::set<std::string> & filesToRetain) { + if (closed()) { return; } LockGuard guard(_modifyTorrentsDownloadingMutex); std::set<std::string> currentFiles; @@ -373,11 +392,19 @@ FileDownloader::removeAllTorrentsBut(const std::set<std::string> & filesToRetain void FileDownloader::runEventLoop() { EventHandler eventHandler(this); while ( ! closed() ) { - if (_session.wait_for_alert(libtorrent::milliseconds(100))) { - std::unique_ptr<libtorrent::alert> alert = _session.pop_alert(); - eventHandler.handle(std::move(alert)); + try { + if (_session.wait_for_alert(libtorrent::milliseconds(100))) { + std::unique_ptr<libtorrent::alert> alert = _session.pop_alert(); + eventHandler.handle(std::move(alert)); + } + } catch (const ZKConnectionLossException & e) { + LOG(info, "Connection loss in downloader event loop, resuming. %s", e.what()); + } catch (const vespalib::PortListenException & e) { + LOG(error, "Failed listening to torrent port : %s", e.what()); + std::quick_exit(21); } } + drain(); } bool diff --git a/filedistribution/src/vespa/filedistribution/distributor/filedownloader.h b/filedistribution/src/vespa/filedistribution/distributor/filedownloader.h index 38de8ac4357..248a906ccb5 100644 --- a/filedistribution/src/vespa/filedistribution/distributor/filedownloader.h +++ b/filedistribution/src/vespa/filedistribution/distributor/filedownloader.h @@ -3,12 +3,7 @@ #include <vector> #include <mutex> -#include <boost/filesystem/path.hpp> #include <boost/optional.hpp> -#include <boost/multi_index_container.hpp> -#include <boost/multi_index/indexed_by.hpp> -#include <boost/multi_index/member.hpp> -#include <boost/multi_index/ordered_index.hpp> #include <libtorrent/session.hpp> @@ -20,16 +15,7 @@ namespace filedistribution { -struct NoSuchTorrentException : public Exception {}; - -struct FailedListeningException : public Exception { - FailedListeningException(const std::string& hostName, int port, const std::string & message) { - *this << errorinfo::HostName(hostName) << errorinfo::Port(port) << errorinfo::TorrentMessage(message); - } - FailedListeningException(const std::string& hostName, int port) { - *this <<errorinfo::HostName(hostName) << errorinfo::Port(port); - } -}; +VESPA_DEFINE_EXCEPTION(NoSuchTorrentException, vespalib::Exception); class FileDownloader { @@ -38,7 +24,7 @@ class FileDownloader ~LogSessionDeconstructed(); }; - size_t _outstanding_SRD_requests; + std::atomic<size_t> _outstanding_SRD_requests; std::shared_ptr<FileDistributionTracker> _tracker; std::mutex _modifyTorrentsDownloadingMutex; @@ -49,7 +35,7 @@ class FileDownloader libtorrent::session _session; std::atomic<bool> _closed; - const boost::filesystem::path _dbPath; + const Path _dbPath; typedef std::vector<char> ResumeDataBuffer; boost::optional<ResumeDataBuffer> getResumeData(const std::string& fileReference); @@ -57,6 +43,8 @@ class FileDownloader void deleteTorrentData(const libtorrent::torrent_handle& torrent, LockGuard&); void listen(); + bool closed() const; + void drain(); public: // accounting of save-resume-data requests: void didRequestSRD() { ++_outstanding_SRD_requests; } @@ -67,14 +55,14 @@ public: FileDownloader(const std::shared_ptr<FileDistributionTracker>& tracker, const std::string& hostName, int port, - const boost::filesystem::path& dbPath); + const Path& dbPath); ~FileDownloader(); DirectoryGuard::UP getGuard() { return std::make_unique<DirectoryGuard>(_dbPath); } void runEventLoop(); void addTorrent(const std::string& fileReference, const Buffer& buffer); bool hasTorrent(const std::string& fileReference) const; - boost::optional<boost::filesystem::path> pathToCompletedFile(const std::string& fileReference) const; + boost::optional<Path> pathToCompletedFile(const std::string& fileReference) const; void removeAllTorrentsBut(const std::set<std::string> & filesToRetain); void signalIfFinishedDownloading(const std::string& fileReference); @@ -83,7 +71,7 @@ public: void setMaxDownloadSpeed(double MBPerSec); void setMaxUploadSpeed(double MBPerSec); void close(); - bool closed() const; + bool drained() const { return _outstanding_SRD_requests == 0; } const std::string _hostName; const int _port; diff --git a/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp b/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp index bf17b1bc8d1..5b6ad4f83ff 100644 --- a/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp +++ b/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp @@ -12,6 +12,7 @@ LOG_SETUP(".filedownloadermanager"); using namespace std::literals; using filedistribution::FileDownloaderManager; +using filedistribution::Path; namespace { void logStartDownload(const std::set<std::string> & filesToDownload) { @@ -50,7 +51,7 @@ FileDownloaderManager::start() FileDistributionModel::FilesToDownloadChangedSignal::slot_type(std::ref(_startDownloads)).track_foreign(shared_from_this())); } -boost::optional< boost::filesystem::path > +boost::optional< Path > FileDownloaderManager::getPath(const std::string& fileReference) { return _fileDownloader->pathToCompletedFile(fileReference); } @@ -116,14 +117,14 @@ FileDownloaderManager::SetFinishedDownloadingStatus::SetFinishedDownloadingStatu void FileDownloaderManager::SetFinishedDownloadingStatus::operator()( - const std::string& fileReference, const boost::filesystem::path&) { + const std::string& fileReference, const Path&) { //Prevent concurrent modifications to peer node in zk. LockGuard updateFilesToDownloadGuard(_parent._updateFilesToDownloadMutex); try { _parent._fileDistributionModel->peerFinished(fileReference); - } catch(const FileDistributionModel::NotPeer&) { //Probably a concurrent removal of the torrent. + } catch (const NotPeer &) { //Probably a concurrent removal of the torrent. //improve chance of libtorrent session being updated. std::this_thread::sleep_for(100ms); diff --git a/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.h b/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.h index 1294f7d7f77..64a2a8a744e 100644 --- a/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.h +++ b/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.h @@ -23,7 +23,7 @@ class FileDownloaderManager : public FileProvider, class SetFinishedDownloadingStatus { FileDownloaderManager& _parent; public: - void operator()(const std::string& fileReference, const boost::filesystem::path&); + void operator()(const std::string& fileReference, const Path&); SetFinishedDownloadingStatus(FileDownloaderManager*); }; @@ -49,7 +49,7 @@ public: ~FileDownloaderManager(); void start(); - boost::optional<boost::filesystem::path> getPath(const std::string& fileReference); + boost::optional<Path> getPath(const std::string& fileReference); void downloadFile(const std::string& fileReference); //FileProvider overrides diff --git a/filedistribution/src/vespa/filedistribution/distributor/hostname.cpp b/filedistribution/src/vespa/filedistribution/distributor/hostname.cpp index acd5c982957..7b33632df81 100644 --- a/filedistribution/src/vespa/filedistribution/distributor/hostname.cpp +++ b/filedistribution/src/vespa/filedistribution/distributor/hostname.cpp @@ -1,22 +1,24 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "hostname.h" -#include <boost/asio.hpp> - +#include <vespa/vespalib/net/socket_address.h> #include <vespa/log/log.h> LOG_SETUP(".hostname"); -#include <vespa/vespalib/net/socket_address.h> -namespace asio = boost::asio; +namespace filedistribution { std::string -filedistribution::lookupIPAddress(const std::string& hostName) +lookupIPAddress(const std::string& hostName) { auto best_addr = vespalib::SocketAddress::select_remote(0, hostName.c_str()); if (!best_addr.valid()) { - BOOST_THROW_EXCEPTION(filedistribution::FailedResolvingHostName(hostName)); + throw filedistribution::FailedResolvingHostName(hostName, VESPA_STRLOC); } const std::string address = best_addr.ip_address(); LOG(debug, "Resolved hostname'%s' as '%s'", hostName.c_str(), address.c_str()); return address; } + +VESPA_IMPLEMENT_EXCEPTION(FailedResolvingHostName, vespalib::Exception); + +} diff --git a/filedistribution/src/vespa/filedistribution/distributor/hostname.h b/filedistribution/src/vespa/filedistribution/distributor/hostname.h index 12732029c92..385a908ab9f 100644 --- a/filedistribution/src/vespa/filedistribution/distributor/hostname.h +++ b/filedistribution/src/vespa/filedistribution/distributor/hostname.h @@ -6,17 +6,8 @@ namespace filedistribution { -namespace errorinfo { -typedef boost::error_info<struct tag_HostName, std::string> HostName; -typedef boost::error_info<struct tag_Port, int> Port; -}; - - std::string lookupIPAddress(const std::string& hostName); -struct FailedResolvingHostName : public Exception { - FailedResolvingHostName(const std::string& hostName) { - *this <<errorinfo::HostName(hostName); - } -}; +VESPA_DEFINE_EXCEPTION(FailedResolvingHostName, vespalib::Exception); + } diff --git a/filedistribution/src/vespa/filedistribution/manager/createtorrent.cpp b/filedistribution/src/vespa/filedistribution/manager/createtorrent.cpp index fd54b65cdbc..52b05cd78be 100644 --- a/filedistribution/src/vespa/filedistribution/manager/createtorrent.cpp +++ b/filedistribution/src/vespa/filedistribution/manager/createtorrent.cpp @@ -61,19 +61,19 @@ fileReferenceToString(const libtorrent::sha1_hash& fileReference) { filedistribution:: CreateTorrent:: -CreateTorrent(const boost::filesystem::path& path) +CreateTorrent(const Path& path) :_path(path), _entry(createEntry(_path)) {} -const filedistribution::Move<filedistribution::Buffer> +filedistribution::Buffer filedistribution:: CreateTorrent:: bencode() const { Buffer buffer(static_cast<int>(targetTorrentSize)); libtorrent::bencode(std::back_inserter(buffer), _entry); - return move(buffer); + return buffer; } const std::string diff --git a/filedistribution/src/vespa/filedistribution/manager/createtorrent.h b/filedistribution/src/vespa/filedistribution/manager/createtorrent.h index 93d56fa9e6f..164f93a5625 100644 --- a/filedistribution/src/vespa/filedistribution/manager/createtorrent.h +++ b/filedistribution/src/vespa/filedistribution/manager/createtorrent.h @@ -2,20 +2,20 @@ #pragma once #include <vector> -#include <boost/filesystem/path.hpp> #include <libtorrent/create_torrent.hpp> #include <vespa/filedistribution/common/buffer.h> +#include <vespa/filedistribution/common/exception.h> namespace filedistribution { class CreateTorrent { - boost::filesystem::path _path; + Path _path; libtorrent::entry _entry; public: - CreateTorrent(const boost::filesystem::path& path); - const Move<Buffer> bencode() const; + CreateTorrent(const Path& path); + Buffer bencode() const; const std::string fileReference() const; }; diff --git a/filedistribution/src/vespa/filedistribution/manager/filedb.h b/filedistribution/src/vespa/filedistribution/manager/filedb.h index d5031fff8f1..9b62e583f6d 100644 --- a/filedistribution/src/vespa/filedistribution/manager/filedb.h +++ b/filedistribution/src/vespa/filedistribution/manager/filedb.h @@ -2,15 +2,14 @@ #pragma once #include <string> -#include <boost/filesystem/path.hpp> #include <vespa/filedistribution/model/filedbmodel.h> namespace filedistribution { class FileDB { - boost::filesystem::path _dbPath; + Path _dbPath; public: - FileDB(boost::filesystem::path dbPath); + FileDB(Path dbPath); DirectoryGuard::UP getGuard() { return std::make_unique<DirectoryGuard>(_dbPath); } /** * @@ -19,7 +18,7 @@ public: * @param name The name the file shall have. * @return true if it was added, false if it was already present. */ - bool add(const DirectoryGuard & directoryGuard, boost::filesystem::path original, const std::string& name); + bool add(const DirectoryGuard & directoryGuard, Path original, const std::string& name); }; } //namespace filedistribution diff --git a/filedistribution/src/vespa/filedistribution/manager/filedistributionmanager.cpp b/filedistribution/src/vespa/filedistribution/manager/filedistributionmanager.cpp index 057902327a9..c87360f3f67 100644 --- a/filedistribution/src/vespa/filedistribution/manager/filedistributionmanager.cpp +++ b/filedistribution/src/vespa/filedistribution/manager/filedistributionmanager.cpp @@ -58,7 +58,7 @@ std::unique_ptr<ZKLogging> _G_zkLogging; } catch(const ZKException& e) { \ std::stringstream ss; \ ss << "In" << __FUNCTION__ << ": "; \ - ss << diagnosticUserLevelMessage(e); \ + ss << e.what(); \ throwRuntimeException(ss.str().c_str(), env); \ returnStatement; \ } catch(const std::exception& e) { \ diff --git a/filedistribution/src/vespa/filedistribution/manager/stderr_logfwd.cpp b/filedistribution/src/vespa/filedistribution/manager/stderr_logfwd.cpp index a3f97b28597..a5041554114 100644 --- a/filedistribution/src/vespa/filedistribution/manager/stderr_logfwd.cpp +++ b/filedistribution/src/vespa/filedistribution/manager/stderr_logfwd.cpp @@ -4,7 +4,6 @@ #include <stdarg.h> #include <iostream> -#include <boost/scoped_array.hpp> #include <stdio.h> @@ -15,12 +14,13 @@ void filedistribution::logfwd::log_forward(LogLevel level, const char* file, int return; const size_t maxSize(0x8000); - boost::scoped_array<char> payload(new char[maxSize]); + std::vector<char> payload(maxSize); + char * buf = &payload[0]; va_list args; va_start(args, fmt); - vsnprintf(payload.get(), maxSize, fmt, args); + vsnprintf(buf, maxSize, fmt, args); va_end(args); - std::cerr <<"Error: " <<payload.get() <<" File: " <<file <<" Line: " <<line <<std::endl; + std::cerr <<"Error: " << buf <<" File: " <<file <<" Line: " <<line <<std::endl; } diff --git a/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.cpp b/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.cpp index 1e80ff375a4..7829744949f 100644 --- a/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.cpp +++ b/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.cpp @@ -8,9 +8,9 @@ #include <vespa/filedistribution/common/logfwd.h> using filedistribution::DeployedFilesToDownload; +using filedistribution::Path; typedef std::vector<std::string> StringVector; -typedef boost::filesystem::path Path; namespace filedistribution { @@ -27,7 +27,7 @@ readApplicationId(filedistribution::ZKFacade & zk, const Path & deployNode) } -const DeployedFilesToDownload::Path +const Path DeployedFilesToDownload::addNewDeployNode(Path parentPath, const FileReferences& files) { Path path = parentPath / "deploy_"; diff --git a/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.h b/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.h index aeed0922fc8..36c89e89501 100644 --- a/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.h +++ b/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.h @@ -1,18 +1,16 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once -#include <boost/filesystem/path.hpp> #include "zkfacade.h" #include "zkfiledbmodel.h" namespace filedistribution { -const std::string readApplicationId(ZKFacade & zk, const boost::filesystem::path & deployNode); +const std::string readApplicationId(ZKFacade & zk, const Path & deployNode); class DeployedFilesToDownload { //includes the current deploy run; static const size_t numberOfDeploysToKeepFiles = 2; - typedef boost::filesystem::path Path; ZKFacade& _zk; @@ -22,7 +20,7 @@ class DeployedFilesToDownload { //Nothrow template <typename INSERT_ITERATOR> - void readDeployFile(const boost::filesystem::path& path, INSERT_ITERATOR insertionIterator); + void readDeployFile(const Path& path, INSERT_ITERATOR insertionIterator); void addAppIdToDeployNode(const Path & deployNode, const std::string & appId); std::map<std::string, std::vector<std::string> > groupChildrenByAppId(const Path & parentPath, const std::vector<std::string> & children); void deleteExpiredDeployNodes(Path parentPath, std::vector<std::string> children); diff --git a/filedistribution/src/vespa/filedistribution/model/filedbmodel.h b/filedistribution/src/vespa/filedistribution/model/filedbmodel.h index d7803b8afea..1f2d24f2ce0 100644 --- a/filedistribution/src/vespa/filedistribution/model/filedbmodel.h +++ b/filedistribution/src/vespa/filedistribution/model/filedbmodel.h @@ -11,23 +11,17 @@ namespace filedistribution { class DirectoryGuard { public: typedef std::unique_ptr<DirectoryGuard> UP; - DirectoryGuard(boost::filesystem::path path); + DirectoryGuard(Path path); ~DirectoryGuard(); private: int _fd; }; -struct InvalidProgressException : public Exception { - const char* what() const throw() { - return "Invalid progress information reported by one of the filedistributors"; - } -}; - -struct FileDoesNotExistException : public Exception {}; +VESPA_DEFINE_EXCEPTION(InvalidProgressException, vespalib::Exception); +VESPA_DEFINE_EXCEPTION(InvalidHostStatusException, vespalib::Exception); class FileDBModel { public: - class InvalidHostStatusException : public Exception {}; struct HostStatus { enum State { finished, inProgress, notStarted }; @@ -43,7 +37,7 @@ public: virtual bool hasFile(const std::string& fileReference) = 0; virtual void addFile(const std::string& fileReference, const Buffer& buffer) = 0; - virtual Move<Buffer> getFile(const std::string& fileReference) = 0; + virtual Buffer getFile(const std::string& fileReference) = 0; virtual void cleanFiles(const std::vector<std::string>& filesToPreserve) = 0; virtual void setDeployedFilesToDownload(const std::string& hostName, diff --git a/filedistribution/src/vespa/filedistribution/model/filedistributionmodel.h b/filedistribution/src/vespa/filedistribution/model/filedistributionmodel.h index 170c5311bf2..32485b60159 100644 --- a/filedistribution/src/vespa/filedistribution/model/filedistributionmodel.h +++ b/filedistribution/src/vespa/filedistribution/model/filedistributionmodel.h @@ -6,7 +6,6 @@ #include <string> #include <set> -#include <boost/filesystem/path.hpp> #include <boost/signals2.hpp> #include <libtorrent/peer.hpp> @@ -16,10 +15,10 @@ namespace filedistribution { +VESPA_DEFINE_EXCEPTION(NotPeer, vespalib::Exception); + class FileDistributionModel { public: - class NotPeer : public Exception {}; - typedef boost::signals2::signal<void ()> FilesToDownloadChangedSignal; typedef std::vector<libtorrent::peer_entry> PeerEntries; diff --git a/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp b/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp index adff69cfc6c..01b08e1f377 100644 --- a/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp +++ b/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp @@ -65,6 +65,12 @@ prunePeers(std::vector<std::string> &peers, size_t maxPeers) { } //anonymous namespace +namespace filedistribution { + +VESPA_IMPLEMENT_EXCEPTION(NotPeer, vespalib::Exception); + +} + using filedistribution::FileDistributionModelImpl; struct FileDistributionModelImpl::DeployedFilesChangedCallback : @@ -159,8 +165,8 @@ FileDistributionModelImpl::peerFinished(const std::string& fileReference) { char progress = 100; //percent _zk->setData(path, &progress, sizeof(char), mustExist); - } catch(ZKNodeDoesNotExistsException&) { - BOOST_THROW_EXCEPTION(NotPeer()); + } catch(ZKNodeDoesNotExistsException & e) { + NotPeer(fileReference, e, VESPA_STRLOC); } } @@ -219,12 +225,14 @@ FileDistributionModelImpl::addConfigServersAsPeers( } } - - void FileDistributionModelImpl::configure(std::unique_ptr<FilereferencesConfig> config) { const bool changed = updateActiveFileReferences(config->filereferences); if (changed) { - _filesToDownloadChanged(); + try { + _filesToDownloadChanged(); + } catch (const ZKConnectionLossException & e) { + LOG(info, "Connection loss in reconfigure of file references, resuming. %s", e.what()); + } } } diff --git a/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.h b/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.h index 224009822e1..0c89e8b12ae 100644 --- a/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.h +++ b/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.h @@ -30,7 +30,7 @@ class FileDistributionModelImpl : public FileDistributionModel, bool /*changed*/ updateActiveFileReferences(const std::vector<vespalib::string>& fileReferences); - ZKFacade::Path getPeerEntryPath(const std::string& fileReference); + Path getPeerEntryPath(const std::string& fileReference); public: FileDistributionModelImpl(const std::string& hostName, int port, const std::shared_ptr<ZKFacade>& zk) :_hostName(hostName), diff --git a/filedistribution/src/vespa/filedistribution/model/mockfiledistributionmodel.h b/filedistribution/src/vespa/filedistribution/model/mockfiledistributionmodel.h index 169225deaf2..cdc77692017 100644 --- a/filedistribution/src/vespa/filedistribution/model/mockfiledistributionmodel.h +++ b/filedistribution/src/vespa/filedistribution/model/mockfiledistributionmodel.h @@ -21,15 +21,14 @@ public: _fileReferences.push_back(fileReference); } - Move<Buffer> getFile(const std::string& fileReference) { + Buffer getFile(const std::string& fileReference) override { (void)fileReference; const char* resultStr = "result"; Buffer result(resultStr, resultStr + strlen(resultStr)); - return move(result); + return result; } - virtual void cleanFiles( - const std::vector<std::string> &) {} + virtual void cleanFiles(const std::vector<std::string> &) {} virtual void setDeployedFilesToDownload(const std::string&, @@ -50,8 +49,7 @@ public: return HostStatus(); } - Progress getProgress(const std::string&, - const std::vector<std::string>&) { + Progress getProgress(const std::string&, const std::vector<std::string>&) { return Progress(); } }; diff --git a/filedistribution/src/vespa/filedistribution/model/zkfacade.cpp b/filedistribution/src/vespa/filedistribution/model/zkfacade.cpp index 7e7caf67ff6..34705729703 100644 --- a/filedistribution/src/vespa/filedistribution/model/zkfacade.cpp +++ b/filedistribution/src/vespa/filedistribution/model/zkfacade.cpp @@ -10,7 +10,6 @@ #include <cstdio> #include <sstream> #include <thread> -#include <boost/throw_exception.hpp> #include <boost/function_output_iterator.hpp> #include <zookeeper/zookeeper.h> @@ -21,15 +20,66 @@ typedef std::unique_lock<std::mutex> UniqueLock; using filedistribution::ZKFacade; -using filedistribution::Move; using filedistribution::Buffer; using filedistribution::ZKGenericException; +using filedistribution::ZKException; using filedistribution::ZKLogging; - -typedef ZKFacade::Path Path; +using filedistribution::Path; namespace { +std::string +toErrorMsg(int zkStatus) { + switch(zkStatus) { + //System errors + case ZRUNTIMEINCONSISTENCY: + return "Zookeeper: A runtime inconsistency was found(ZRUNTIMEINCONSISTENCY)"; + case ZDATAINCONSISTENCY: + return "Zookeeper: A data inconsistency was found(ZDATAINCONSISTENCY)"; + case ZCONNECTIONLOSS: + return "Zookeeper: Connection to the server has been lost(ZCONNECTIONLOSS)"; + case ZMARSHALLINGERROR: + return "Zookeeper: Error while marshalling or unmarshalling data(ZMARSHALLINGERROR)"; + case ZUNIMPLEMENTED: + return "Zookeeper: Operation is unimplemented(ZUNIMPLEMENTED)"; + case ZOPERATIONTIMEOUT: + return "Zookeeper: Operation timeout(ZOPERATIONTIMEOUT)"; + case ZBADARGUMENTS: + return "Zookeeper: Invalid arguments(ZBADARGUMENTS)"; + case ZINVALIDSTATE: + return "Zookeeper: The connection with the zookeeper servers timed out(ZINVALIDSTATE)."; + + //API errors + case ZNONODE: + return "Zookeeper: Node does not exist(ZNONODE)"; + case ZNOAUTH: + return "Zookeeper: Not authenticated(ZNOAUTH)"; + case ZBADVERSION: + return "Zookeeper: Version conflict(ZBADVERSION)"; + case ZNOCHILDRENFOREPHEMERALS: + return "Zookeeper: Ephemeral nodes may not have children(ZNOCHILDRENFOREPHEMERALS)"; + case ZNODEEXISTS: + return "Zookeeper: The node already exists(ZNODEEXISTS)"; + case ZNOTEMPTY: + return "Zookeeper: The node has children(ZNOTEMPTY)"; + case ZSESSIONEXPIRED: + return "Zookeeper: The session has been expired by the server(ZSESSIONEXPIRED)"; + case ZINVALIDCALLBACK: + return "Zookeeper: Invalid callback specified(ZINVALIDCALLBACK)"; + case ZINVALIDACL: + return "Zookeeper: Invalid ACL specified(ZINVALIDACL)"; + case ZAUTHFAILED: + return "Zookeeper: Client authentication failed(ZAUTHFAILED)"; + case ZCLOSING: + return "Zookeeper: ZooKeeper is closing(ZCLOSING)"; + case ZNOTHING: + return "Zookeeper: No server responses to process(ZNOTHING)"; + default: + std::cerr <<"In ZKGenericException::what(): Invalid error code " << zkStatus <<std::endl; + return "Zookeeper: Invalid error code."; + } +} + class RetryController { unsigned int _retryCount; ZKFacade& _zkFacade; @@ -70,19 +120,22 @@ public: return true; } - void throwIfError() { + void throwIfError(const Path & path) { namespace fd = filedistribution; switch (_lastStatus) { case ZSESSIONEXPIRED: - BOOST_THROW_EXCEPTION(fd::ZKSessionExpired()); + throw fd::ZKSessionExpired(path.string(), VESPA_STRLOC); case ZNONODE: - BOOST_THROW_EXCEPTION(fd::ZKNodeDoesNotExistsException()); + throw fd::ZKNodeDoesNotExistsException(path.string(), VESPA_STRLOC); case ZNODEEXISTS: - BOOST_THROW_EXCEPTION(fd::ZKNodeExistsException()); + throw fd::ZKNodeExistsException(path.string(), VESPA_STRLOC); + case ZCONNECTIONLOSS: + throw fd::ZKConnectionLossException(path.string(), VESPA_STRLOC); default: - if (_lastStatus != ZOK) - BOOST_THROW_EXCEPTION(fd::ZKGenericException(_lastStatus)); + if (_lastStatus != ZOK) { + throw fd::ZKGenericException(_lastStatus, toErrorMsg(_lastStatus) + " : " + path.string(), VESPA_STRLOC); + } } } }; @@ -106,15 +159,11 @@ setDataForNewFile(ZKFacade& zk, const Path& path, const char* buffer, int length const int maxPath = 1024; char createdPath[maxPath]; do { - retryController( - zoo_create(zhandle, path.string().c_str(), - buffer, length, - &ZOO_OPEN_ACL_UNSAFE, createFlags, - createdPath, maxPath)); + retryController( zoo_create(zhandle, path.string().c_str(), buffer, length, &ZOO_OPEN_ACL_UNSAFE, createFlags, createdPath, maxPath)); } while (retryController.shouldRetry()); - - retryController.throwIfError(); - return Path(createdPath); + Path newPath(createdPath); + retryController.throwIfError(newPath); + return newPath; } void @@ -123,16 +172,25 @@ setDataForExistingFile(ZKFacade& zk, const Path& path, const char* buffer, int l const int ignoreVersion = -1; do { - retryController( - zoo_set(zhandle, path.string().c_str(), - buffer, length, ignoreVersion)); + retryController(zoo_set(zhandle, path.string().c_str(), buffer, length, ignoreVersion)); } while (retryController.shouldRetry()); - retryController.throwIfError(); + retryController.throwIfError(path); } } //anonymous namespace +namespace filedistribution { + +VESPA_IMPLEMENT_EXCEPTION(ZKNodeDoesNotExistsException, ZKException); +VESPA_IMPLEMENT_EXCEPTION(ZKConnectionLossException, ZKException); +VESPA_IMPLEMENT_EXCEPTION(ZKNodeExistsException, ZKException); +VESPA_IMPLEMENT_EXCEPTION(ZKFailedConnecting, ZKException); +VESPA_IMPLEMENT_EXCEPTION(ZKSessionExpired, ZKException); +VESPA_IMPLEMENT_EXCEPTION_SPINE(ZKGenericException); + +} + /********** Active watchers *******************************************/ struct ZKFacade::ZKWatcher { const std::weak_ptr<ZKFacade> _owner; @@ -179,19 +237,23 @@ struct ZKFacade::ZKWatcher { void ZKFacade::stateWatchingFun(zhandle_t*, int type, int state, const char* path, void* context) { - (void)path; (void)context; //The ZKFacade won't expire before zookeeper_close has finished. - if (type == ZOO_SESSION_EVENT) { - LOGFWD(debug, "Zookeeper session event: %d", state); - if (state == ZOO_EXPIRED_SESSION_STATE) { - throw ZKSessionExpired(); - } else if (state == ZOO_AUTH_FAILED_STATE) { - throw ZKGenericException(ZNOAUTH); + try { + if (type == ZOO_SESSION_EVENT) { + LOGFWD(debug, "Zookeeper session event: %d", state); + if (state == ZOO_EXPIRED_SESSION_STATE) { + throw ZKSessionExpired(path, VESPA_STRLOC); + } else if (state == ZOO_AUTH_FAILED_STATE) { + throw ZKGenericException(ZNOAUTH, path, VESPA_STRLOC); + } + } else { + LOGFWD(info, "State watching function: Unexpected event: '%d' -- '%d' ", type, state); } - } else { - LOGFWD(info, "State watching function: Unexpected event: '%d' -- '%d' ", type, state); + } catch (ZKSessionExpired & e) { + LOGFWD(error, "Received ZKSessionExpired exception that I can not handle. Will just exit quietly : %s", e.what()); + std::quick_exit(11); } } @@ -248,7 +310,7 @@ ZKFacade::ZKFacade(const std::string& zkservers) 0)) //flags { if (!_zhandle) { - BOOST_THROW_EXCEPTION(ZKFailedConnecting()); + throw ZKFailedConnecting("No zhandle", VESPA_STRLOC); } } @@ -272,67 +334,46 @@ ZKFacade::getString(const Path& path) { return std::string(buffer.begin(), buffer.end()); } -const Move<Buffer> +Buffer ZKFacade::getData(const Path& path) { RetryController retryController(this); - try { - Buffer buffer(_maxDataSize); - int bufferSize = _maxDataSize; - - const int watchIsOff = 0; - do { - Stat stat; - bufferSize = _maxDataSize; - - retryController( - zoo_get(_zhandle, path.string().c_str(), watchIsOff, - &*buffer.begin(), - &bufferSize, //in & out - &stat)); - } while(retryController.shouldRetry()); + Buffer buffer(_maxDataSize); + int bufferSize = _maxDataSize; - retryController.throwIfError(); + const int watchIsOff = 0; + do { + Stat stat; + bufferSize = _maxDataSize; - buffer.resize(bufferSize); - return move(buffer); + retryController( zoo_get(_zhandle, path.string().c_str(), watchIsOff, &*buffer.begin(), &bufferSize, &stat)); + } while(retryController.shouldRetry()); - } catch(boost::exception& e) { - e <<errorinfo::Path(path); - throw; - } + retryController.throwIfError(path); + buffer.resize(bufferSize); + return buffer; } -const Move<Buffer> +Buffer ZKFacade::getData(const Path& path, const NodeChangedWatcherSP& watcher) { - void* watcherContext = registerWatcher(watcher); + RegistrationGuard unregisterGuard(*this, watcher); + void* watcherContext = unregisterGuard.get(); RetryController retryController(this); - try { - Buffer buffer(_maxDataSize); - int bufferSize = _maxDataSize; - - do { - Stat stat; - bufferSize = _maxDataSize; - - retryController( - zoo_wget(_zhandle, path.string().c_str(), - &ZKWatcher::watcherFn, watcherContext, - &*buffer.begin(), - &bufferSize, //in & out - &stat)); - } while(retryController.shouldRetry()); - - retryController.throwIfError(); - - buffer.resize(bufferSize); - return move(buffer); - - } catch(boost::exception& e) { - unregisterWatcher(watcherContext); - e <<errorinfo::Path(path); - throw; - } + Buffer buffer(_maxDataSize); + int bufferSize = _maxDataSize; + + do { + Stat stat; + bufferSize = _maxDataSize; + + retryController( zoo_wget(_zhandle, path.string().c_str(), &ZKWatcher::watcherFn, watcherContext, &*buffer.begin(), &bufferSize, &stat)); + } while (retryController.shouldRetry()); + + retryController.throwIfError(path); + + buffer.resize(bufferSize); + unregisterGuard.release(); + return buffer; } void @@ -344,14 +385,10 @@ void ZKFacade::setData(const Path& path, const char* buffer, size_t length, bool mustExist) { assert (length < _maxDataSize); - try { - if (mustExist || hasNode(path)) - setDataForExistingFile(*this, path, buffer, length, _zhandle); - else - setDataForNewFile(*this, path, buffer, length, _zhandle, 0); - } catch(boost::exception& e) { - e <<errorinfo::Path(path); - throw; + if (mustExist || hasNode(path)) { + setDataForExistingFile(*this, path, buffer, length, _zhandle); + } else { + setDataForNewFile(*this, path, buffer, length, _zhandle, 0); } } @@ -365,63 +402,53 @@ ZKFacade::createSequenceNode(const Path& path, const char* buffer, size_t length bool ZKFacade::hasNode(const Path& path) { - try { - RetryController retryController(this); - do { - Stat stat; - const int noWatch = 0; - retryController( - zoo_exists(_zhandle, path.string().c_str(), noWatch, &stat)); - } while(retryController.shouldRetry()); - - switch(retryController._lastStatus) { - case ZNONODE: - return false; - case ZOK: - return true; - default: - retryController.throwIfError(); - //this should never happen: - assert(false); - return false; - } + RetryController retryController(this); + do { + Stat stat; + const int noWatch = 0; + retryController( zoo_exists(_zhandle, path.string().c_str(), noWatch, &stat)); + } while(retryController.shouldRetry()); - } catch (boost::exception &e) { - e <<errorinfo::Path(path); - throw; + switch(retryController._lastStatus) { + case ZNONODE: + return false; + case ZOK: + return true; + default: + retryController.throwIfError(path); + //this should never happen: + assert(false); + return false; } } bool ZKFacade::hasNode(const Path& path, const NodeChangedWatcherSP& watcher) { - void* watcherContext = registerWatcher(watcher); - try { - RetryController retryController(this); - do { - Stat stat; - retryController( - zoo_wexists(_zhandle, path.string().c_str(), - &ZKWatcher::watcherFn, watcherContext, - &stat)); - } while(retryController.shouldRetry()); - - switch(retryController._lastStatus) { - case ZNONODE: - return false; - case ZOK: - return true; - default: - retryController.throwIfError(); - //this should never happen: - assert(false); - return false; - } + RegistrationGuard unregisterGuard(*this, watcher); + void* watcherContext = unregisterGuard.get(); + RetryController retryController(this); + do { + Stat stat; + retryController(zoo_wexists(_zhandle, path.string().c_str(), &ZKWatcher::watcherFn, watcherContext, &stat)); + } while (retryController.shouldRetry()); - } catch (boost::exception &e) { - unregisterWatcher(watcherContext); - e <<errorinfo::Path(path); - throw; + bool retval(false); + switch(retryController._lastStatus) { + case ZNONODE: + retval = false; + break; + case ZOK: + retval = true; + break; + default: + retryController.throwIfError(path); + //this should never happen: + assert(false); + retval = false; + break; } + unregisterGuard.release(); + return retval; } void @@ -431,9 +458,6 @@ ZKFacade::addEphemeralNode(const Path& path) { } catch(const ZKNodeExistsException& e) { remove(path); addEphemeralNode(path); - } catch (boost::exception& e) { - e <<errorinfo::Path(path); - throw; } } @@ -444,21 +468,14 @@ ZKFacade::remove(const Path& path) { std::for_each(children.begin(), children.end(), [&](const std::string & s){ remove(path / s); }); } - try { - RetryController retryController(this); - do { - int ignoreVersion = -1; - - retryController( - zoo_delete(_zhandle, path.string().c_str(), - ignoreVersion)); - } while (retryController.shouldRetry()); - - if (retryController._lastStatus != ZNONODE) - retryController.throwIfError(); + RetryController retryController(this); + do { + int ignoreVersion = -1; + retryController( zoo_delete(_zhandle, path.string().c_str(), ignoreVersion)); + } while (retryController.shouldRetry()); - } catch(boost::exception& e) { - e <<errorinfo::Path(path); + if (retryController._lastStatus != ZNONODE) { + retryController.throwIfError(path); } } @@ -490,64 +507,49 @@ ZKFacade::retainOnly(const Path& path, const std::vector<std::string>& childrenT std::vector< std::string > ZKFacade::getChildren(const Path& path) { - try { - RetryController retryController(this); - String_vector children; - do { - const bool watch = false; - retryController( - zoo_get_children(_zhandle, path.string().c_str(), watch, &children)); - } while(retryController.shouldRetry()); + RetryController retryController(this); + String_vector children; + do { + const bool watch = false; + retryController( zoo_get_children(_zhandle, path.string().c_str(), watch, &children)); + } while (retryController.shouldRetry()); - retryController.throwIfError(); + retryController.throwIfError(path); - DeallocateZKStringVectorGuard deallocateGuard(children); + DeallocateZKStringVectorGuard deallocateGuard(children); - typedef std::vector<std::string> ResultType; - ResultType result; - result.reserve(children.count); + typedef std::vector<std::string> ResultType; + ResultType result; + result.reserve(children.count); - std::copy(children.data, children.data + children.count, - std::back_inserter(result)); + std::copy(children.data, children.data + children.count, std::back_inserter(result)); - return result; - } catch (boost::exception& e) { - e <<errorinfo::Path(path); - throw; - } + return result; } std::vector< std::string > ZKFacade::getChildren(const Path& path, const NodeChangedWatcherSP& watcher) { - void* watcherContext = registerWatcher(watcher); + RegistrationGuard unregisterGuard(*this, watcher); + void* watcherContext = unregisterGuard.get(); - try { - RetryController retryController(this); - String_vector children; - do { - retryController( - zoo_wget_children(_zhandle, path.string().c_str(), - &ZKWatcher::watcherFn, watcherContext, - &children)); - } while(retryController.shouldRetry()); + RetryController retryController(this); + String_vector children; + do { + retryController( zoo_wget_children(_zhandle, path.string().c_str(), &ZKWatcher::watcherFn, watcherContext, &children)); + } while (retryController.shouldRetry()); - retryController.throwIfError(); + retryController.throwIfError(path); - DeallocateZKStringVectorGuard deallocateGuard(children); + DeallocateZKStringVectorGuard deallocateGuard(children); - typedef std::vector<std::string> ResultType; - ResultType result; - result.reserve(children.count); + typedef std::vector<std::string> ResultType; + ResultType result; + result.reserve(children.count); - std::copy(children.data, children.data + children.count, - std::back_inserter(result)); + std::copy(children.data, children.data + children.count, std::back_inserter(result)); - return result; - } catch (boost::exception& e) { - unregisterWatcher(watcherContext); - e <<errorinfo::Path(path); - throw; - } + unregisterGuard.release(); + return result; } @@ -579,68 +581,3 @@ ZKLogging::~ZKLogging() _file = nullptr; } } - -const char* -ZKGenericException::what() const throw() { - switch(_zkStatus) { - //System errors - case ZRUNTIMEINCONSISTENCY: - return "Zookeeper: A runtime inconsistency was found(ZRUNTIMEINCONSISTENCY)"; - case ZDATAINCONSISTENCY: - return "Zookeeper: A data inconsistency was found(ZDATAINCONSISTENCY)"; - case ZCONNECTIONLOSS: - return "Zookeeper: Connection to the server has been lost(ZCONNECTIONLOSS)"; - case ZMARSHALLINGERROR: - return "Zookeeper: Error while marshalling or unmarshalling data(ZMARSHALLINGERROR)"; - case ZUNIMPLEMENTED: - return "Zookeeper: Operation is unimplemented(ZUNIMPLEMENTED)"; - case ZOPERATIONTIMEOUT: - return "Zookeeper: Operation timeout(ZOPERATIONTIMEOUT)"; - case ZBADARGUMENTS: - return "Zookeeper: Invalid arguments(ZBADARGUMENTS)"; - case ZINVALIDSTATE: - return "Zookeeper: The connection with the zookeeper servers timed out(ZINVALIDSTATE)."; - - //API errors - case ZNONODE: - return "Zookeeper: Node does not exist(ZNONODE)"; - case ZNOAUTH: - return "Zookeeper: Not authenticated(ZNOAUTH)"; - case ZBADVERSION: - return "Zookeeper: Version conflict(ZBADVERSION)"; - case ZNOCHILDRENFOREPHEMERALS: - return "Zookeeper: Ephemeral nodes may not have children(ZNOCHILDRENFOREPHEMERALS)"; - case ZNODEEXISTS: - return "Zookeeper: The node already exists(ZNODEEXISTS)"; - case ZNOTEMPTY: - return "Zookeeper: The node has children(ZNOTEMPTY)"; - case ZSESSIONEXPIRED: - return "Zookeeper: The session has been expired by the server(ZSESSIONEXPIRED)"; - case ZINVALIDCALLBACK: - return "Zookeeper: Invalid callback specified(ZINVALIDCALLBACK)"; - case ZINVALIDACL: - return "Zookeeper: Invalid ACL specified(ZINVALIDACL)"; - case ZAUTHFAILED: - return "Zookeeper: Client authentication failed(ZAUTHFAILED)"; - case ZCLOSING: - return "Zookeeper: ZooKeeper is closing(ZCLOSING)"; - case ZNOTHING: - return "Zookeeper: No server responses to process(ZNOTHING)"; - default: - std::cerr <<"In ZKGenericException::what(): Invalid error code " <<_zkStatus <<std::endl; - return "Zookeeper: Invalid error code."; - } -} - -const std::string -filedistribution::diagnosticUserLevelMessage(const ZKException& exception) { - const char* indent = " "; - std::ostringstream message; - message <<exception.what(); - - const errorinfo::Path::value_type* path = boost::get_error_info<errorinfo::Path>(exception); - if (path) { - message <<std::endl <<indent <<"Path: " <<*path; - } - return message.str(); -} diff --git a/filedistribution/src/vespa/filedistribution/model/zkfacade.h b/filedistribution/src/vespa/filedistribution/model/zkfacade.h index 7631fa6d9dc..bebc133590d 100644 --- a/filedistribution/src/vespa/filedistribution/model/zkfacade.h +++ b/filedistribution/src/vespa/filedistribution/model/zkfacade.h @@ -3,61 +3,45 @@ #include <string> #include <vector> +#include <map> #include <mutex> -#include <boost/filesystem/path.hpp> -#include <boost/signals2.hpp> #include <vespa/filedistribution/common/buffer.h> #include <vespa/filedistribution/common/exception.h> +#include <vespa/vespalib/util/exception.h> struct _zhandle; typedef _zhandle zhandle_t; namespace filedistribution { -namespace errorinfo { -typedef boost::error_info<struct tag_Path, boost::filesystem::path> Path; -} - -class ZKException : public Exception { +class ZKException : public vespalib::Exception { protected: - ZKException() {} + using vespalib::Exception::Exception; }; -struct ZKNodeDoesNotExistsException : public ZKException { - const char* what() const throw() { - return "Zookeeper: The node does not exist(ZNONODE)."; - } -}; +VESPA_DEFINE_EXCEPTION(ZKNodeDoesNotExistsException, ZKException); +VESPA_DEFINE_EXCEPTION(ZKConnectionLossException, ZKException); +VESPA_DEFINE_EXCEPTION(ZKNodeExistsException, ZKException); +VESPA_DEFINE_EXCEPTION(ZKFailedConnecting, ZKException); +VESPA_DEFINE_EXCEPTION(ZKSessionExpired, ZKException); -struct ZKNodeExistsException : public ZKException { - const char* what() const throw() { - return "Zookeeper: The node already exists(ZNODEEXISTS)."; - } -}; - -struct ZKGenericException : public ZKException { +class ZKGenericException : public ZKException { +public: + ZKGenericException(int zkStatus, const vespalib::stringref &msg, const vespalib::stringref &location = "", int skipStack = 0) : + ZKException(msg, location, skipStack), + _zkStatus(zkStatus) + { } + ZKGenericException(int zkStatus, const vespalib::Exception &cause, const vespalib::stringref &msg = "", + const vespalib::stringref &location = "", int skipStack = 0) : + ZKException(msg, cause, location, skipStack), + _zkStatus(zkStatus) + { } + VESPA_DEFINE_EXCEPTION_SPINE(ZKGenericException); +private: const int _zkStatus; - ZKGenericException(int zkStatus) - :_zkStatus(zkStatus) - {} - - const char* what() const throw(); }; -struct ZKFailedConnecting : public ZKException { - const char* what() const throw() { - return "Zookeeper: Failed connecting to the zookeeper servers."; - } -}; - -class ZKSessionExpired : public ZKException {}; - -const std::string -diagnosticUserLevelMessage(const ZKException& zk); - - - class ZKFacade : public std::enable_shared_from_this<ZKFacade> { volatile bool _retriesEnabled; volatile bool _watchersEnabled; @@ -85,7 +69,6 @@ public: }; typedef std::shared_ptr<NodeChangedWatcher> NodeChangedWatcherSP; - typedef boost::filesystem::path Path; ZKFacade(const ZKFacade &) = delete; ZKFacade & operator = (const ZKFacade &) = delete; @@ -96,9 +79,9 @@ public: bool hasNode(const Path&, const NodeChangedWatcherSP&); const std::string getString(const Path&); - const Move<Buffer> getData(const Path&); //throws ZKNodeDoesNotExistsException + Buffer getData(const Path&); //throws ZKNodeDoesNotExistsException //if watcher is specified, it will be set even if the node does not exists - const Move<Buffer> getData(const Path&, const NodeChangedWatcherSP&); //throws ZKNodeDoesNotExistsException + Buffer getData(const Path&, const NodeChangedWatcherSP&); //throws ZKNodeDoesNotExistsException //Parent path must exist void setData(const Path&, const Buffer& buffer, bool mustExist = false); @@ -122,6 +105,22 @@ public: } private: + class RegistrationGuard { + public: + RegistrationGuard & operator = (const RegistrationGuard &) = delete; + RegistrationGuard(const RegistrationGuard &) = delete; + RegistrationGuard(ZKFacade & zk, const NodeChangedWatcherSP & watcher) : _zk(zk), _watcherContext(_zk.registerWatcher(watcher)) { } + ~RegistrationGuard() { + if (_watcherContext) { + _zk.unregisterWatcher(_watcherContext); + } + } + void * get() { return _watcherContext; } + void release() { _watcherContext = nullptr; } + private: + ZKFacade & _zk; + void * _watcherContext; + }; void* registerWatcher(const NodeChangedWatcherSP &); //returns watcherContext std::shared_ptr<ZKWatcher> unregisterWatcher(void* watcherContext); void invokeWatcher(void* watcherContext); diff --git a/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.cpp b/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.cpp index 70827305138..a249aefd8e2 100644 --- a/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.cpp +++ b/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.cpp @@ -36,7 +36,7 @@ isEntryForHost(const std::string& host, const std::string& peerEntry) { } std::vector<std::string> -getSortedChildren(ZKFacade& zk, const ZKFileDBModel::Path& path) { +getSortedChildren(ZKFacade& zk, const Path& path) { std::vector<std::string> children = zk.getChildren(path); std::sort(children.begin(), children.end()); return children; @@ -44,9 +44,12 @@ getSortedChildren(ZKFacade& zk, const ZKFileDBModel::Path& path) { } //anonymous namespace -const ZKFileDBModel::Path ZKFileDBModel::_root = "/vespa/filedistribution"; -const ZKFileDBModel::Path ZKFileDBModel::_fileDBPath = _root / "files"; -const ZKFileDBModel::Path ZKFileDBModel::_hostsPath = _root / "hosts"; +VESPA_IMPLEMENT_EXCEPTION(InvalidProgressException, vespalib::Exception); +VESPA_IMPLEMENT_EXCEPTION(InvalidHostStatusException, vespalib::Exception); + +const Path ZKFileDBModel::_root = "/vespa/filedistribution"; +const Path ZKFileDBModel::_fileDBPath = _root / "files"; +const Path ZKFileDBModel::_hostsPath = _root / "hosts"; bool ZKFileDBModel::hasFile(const std::string& fileReference) { @@ -58,12 +61,12 @@ ZKFileDBModel::addFile(const std::string& fileReference, const Buffer& buffer) { return _zk->setData(createPath(fileReference), buffer); } -Move<Buffer> +Buffer ZKFileDBModel::getFile(const std::string& fileReference) { try { return _zk->getData(createPath(fileReference)); - } catch(const ZKNodeDoesNotExistsException&) { - throw FileDoesNotExistException(); + } catch(const ZKNodeDoesNotExistsException & e) { + throw FileDoesNotExistException(fileReference, e, VESPA_STRLOC); } } @@ -221,7 +224,7 @@ ZKFileDBModel::getHostStatus(const std::string& hostName) { candidate++; if (candidate != peerEntries.end() && isEntryForHost(hostName, *candidate)) - BOOST_THROW_EXCEPTION(InvalidHostStatusException()); + throw InvalidHostStatusException(path.string(), VESPA_STRLOC); } } @@ -234,8 +237,7 @@ ZKFileDBModel::getHostStatus(const std::string& hostName) { } void -ZKFileDBModel::cleanFiles( - const std::vector<std::string>& filesToPreserve) { +ZKFileDBModel::cleanFiles(const std::vector<std::string>& filesToPreserve) { _zk->retainOnly(_fileDBPath, filesToPreserve); } @@ -257,7 +259,7 @@ ZKFileDBModel::getProgress(const Path& path) { else if (buffer.size() == 0) return 0; else { - throw boost::enable_current_exception(InvalidProgressException()) <<errorinfo::Path(path); + throw InvalidProgressException(path.string(), VESPA_STRLOC); } } catch (ZKNodeDoesNotExistsException& e) { //progress information deleted @@ -294,7 +296,7 @@ ZKFileDBModel::getProgress(const std::string& fileReference, FileDBModel::~FileDBModel() {} -DirectoryGuard::DirectoryGuard(boost::filesystem::path path) : +DirectoryGuard::DirectoryGuard(Path path) : _fd(-1) { _fd = open(path.c_str(), O_RDONLY); diff --git a/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.h b/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.h index 4249410c00e..7be2a383e7b 100644 --- a/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.h +++ b/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.h @@ -7,8 +7,6 @@ namespace filedistribution { class ZKFileDBModel : public FileDBModel { -public: - typedef boost::filesystem::path Path; private: const std::shared_ptr<ZKFacade> _zk; char getProgress(const Path& path); @@ -29,7 +27,7 @@ public: //overrides bool hasFile(const std::string& fileReference); void addFile(const std::string& fileReference, const Buffer& buffer); - Move<Buffer> getFile(const std::string& fileReference); + Buffer getFile(const std::string& fileReference) override; void cleanFiles(const std::vector<std::string>& filesToPreserve); void setDeployedFilesToDownload(const std::string& hostName, diff --git a/filedistribution/src/vespa/filedistribution/rpc/filedistributorrpc.cpp b/filedistribution/src/vespa/filedistribution/rpc/filedistributorrpc.cpp index 6579ea06f31..26f12cba578 100644 --- a/filedistribution/src/vespa/filedistribution/rpc/filedistributorrpc.cpp +++ b/filedistribution/src/vespa/filedistribution/rpc/filedistributorrpc.cpp @@ -3,7 +3,6 @@ #include "filedistributorrpc.h" #include <boost/optional.hpp> -#include <boost/exception/diagnostic_information.hpp> #include <vespa/log/log.h> LOG_SETUP(".filedistributorrpc"); @@ -222,8 +221,7 @@ FileDistributorRPC::Server::waitFor(FRT_RPCRequest* request) { frtstream::FrtServerStream requestHandler(request); std::string fileReference; requestHandler >> fileReference; - boost::optional<fs::path> path - = _fileProvider->getPath(fileReference); + boost::optional<fs::path> path = _fileProvider->getPath(fileReference); if (path) { LOG(debug, "Returning request for file reference '%s'.", fileReference.c_str()); requestHandler << path->string(); @@ -238,9 +236,8 @@ FileDistributorRPC::Server::waitFor(FRT_RPCRequest* request) { "No such file reference"); request->Return(); } catch (const std::exception& e) { - LOG(error, "An exception occurred while calling the rpc method waitFor:%s", - boost::diagnostic_information(e).c_str()); - request->SetError(RPCErrorCodes::unknownError, boost::diagnostic_information(e).c_str()); + LOG(error, "An exception occurred while calling the rpc method waitFor:%s", e.what()); + request->SetError(RPCErrorCodes::unknownError, e.what()); request->Return(); //the request might be detached. } } diff --git a/filedistribution/src/vespa/filedistribution/rpc/filedistributorrpc.h b/filedistribution/src/vespa/filedistribution/rpc/filedistributorrpc.h index 3c780bf5878..95a7d6113a0 100644 --- a/filedistribution/src/vespa/filedistribution/rpc/filedistributorrpc.h +++ b/filedistribution/src/vespa/filedistribution/rpc/filedistributorrpc.h @@ -2,7 +2,6 @@ #pragma once #include <memory> -#include <boost/enable_shared_from_this.hpp> #include "fileprovider.h" diff --git a/filedistribution/src/vespa/filedistribution/rpc/fileprovider.h b/filedistribution/src/vespa/filedistribution/rpc/fileprovider.h index 4eeeee5e359..513535db686 100644 --- a/filedistribution/src/vespa/filedistribution/rpc/fileprovider.h +++ b/filedistribution/src/vespa/filedistribution/rpc/fileprovider.h @@ -1,8 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once +#include <vespa/filedistribution/common/exception.h> #include<boost/optional.hpp> -#include<boost/filesystem/path.hpp> #include<boost/signals2.hpp> namespace filedistribution { @@ -11,9 +11,7 @@ class FileProvider { public: using SP = std::shared_ptr<FileProvider>; - typedef boost::signals2::signal<void (const std::string& /* fileReference */, - const boost::filesystem::path&)> - DownloadCompletedSignal; + typedef boost::signals2::signal<void (const std::string& /* fileReference */, const Path&)> DownloadCompletedSignal; typedef DownloadCompletedSignal::slot_type DownloadCompletedHandler; enum FailedDownloadReason { @@ -21,12 +19,10 @@ public: FileReferenceRemoved }; - typedef boost::signals2::signal<void (const std::string& /* fileReference */, - FailedDownloadReason)> - DownloadFailedSignal; + typedef boost::signals2::signal<void (const std::string& /* fileReference */, FailedDownloadReason)> DownloadFailedSignal; typedef DownloadFailedSignal::slot_type DownloadFailedHandler; - virtual boost::optional<boost::filesystem::path> getPath(const std::string& fileReference) = 0; + virtual boost::optional<Path> getPath(const std::string& fileReference) = 0; virtual void downloadFile(const std::string& fileReference) = 0; virtual ~FileProvider() {} diff --git a/fnet/src/tests/info/info.cpp b/fnet/src/tests/info/info.cpp index 284be22db63..34b2e851ec7 100644 --- a/fnet/src/tests/info/info.cpp +++ b/fnet/src/tests/info/info.cpp @@ -73,9 +73,9 @@ TEST("size of important objects") EXPECT_EQUAL(184u, sizeof(FNET_IOComponent)); EXPECT_EQUAL(32u, sizeof(FNET_Channel)); EXPECT_EQUAL(40u, sizeof(FNET_PacketQueue_NoLock)); - EXPECT_EQUAL(512u, sizeof(FNET_Connection)); + EXPECT_EQUAL(536u, sizeof(FNET_Connection)); EXPECT_EQUAL(96u, sizeof(FNET_Cond)); - EXPECT_EQUAL(48u, sizeof(FNET_DataBuffer)); + EXPECT_EQUAL(56u, sizeof(FNET_DataBuffer)); EXPECT_EQUAL(24u, sizeof(FastOS_Time)); EXPECT_EQUAL(8u, sizeof(FNET_Context)); EXPECT_EQUAL(8u, sizeof(fastos::TimeStamp)); diff --git a/fnet/src/vespa/fnet/databuffer.cpp b/fnet/src/vespa/fnet/databuffer.cpp index 3239dbf5087..913196d6e72 100644 --- a/fnet/src/vespa/fnet/databuffer.cpp +++ b/fnet/src/vespa/fnet/databuffer.cpp @@ -15,7 +15,7 @@ FNET_DataBuffer::FNET_DataBuffer(uint32_t len) len = 256; if (len > 0) { - DefaultAlloc(len).swap(_ownedBuf); + DefaultAlloc::create(len).swap(_ownedBuf); memset(_ownedBuf.get(), 0x55, len); _bufstart = static_cast<char *>(_ownedBuf.get()); assert(_bufstart != NULL); @@ -72,7 +72,7 @@ FNET_DataBuffer::Shrink(uint32_t newsize) return false; } - DefaultAlloc newBuf(newsize); + Alloc newBuf(DefaultAlloc::create(newsize)); memset(newBuf.get(), 0x55, newsize); memcpy(newBuf.get(), _datapt, GetDataLen()); _ownedBuf.swap(newBuf); @@ -97,7 +97,7 @@ FNET_DataBuffer::Pack(uint32_t needbytes) while (bufsize - GetDataLen() < needbytes) bufsize *= 2; - DefaultAlloc newBuf(bufsize); + Alloc newBuf(DefaultAlloc::create(bufsize)); memset(newBuf.get(), 0x55, bufsize); memcpy(newBuf.get(), _datapt, GetDataLen()); _ownedBuf.swap(newBuf); diff --git a/fnet/src/vespa/fnet/databuffer.h b/fnet/src/vespa/fnet/databuffer.h index 23802d2ea90..8e4127d3b72 100644 --- a/fnet/src/vespa/fnet/databuffer.h +++ b/fnet/src/vespa/fnet/databuffer.h @@ -29,11 +29,12 @@ class FNET_DataBuffer { private: - char *_bufstart; - char *_bufend; - char *_datapt; - char *_freept; - vespalib::DefaultAlloc _ownedBuf; + using Alloc = vespalib::alloc::Alloc; + char *_bufstart; + char *_bufend; + char *_datapt; + char *_freept; + Alloc _ownedBuf; FNET_DataBuffer(const FNET_DataBuffer &); FNET_DataBuffer &operator=(const FNET_DataBuffer &); diff --git a/fnet/src/vespa/fnet/frt/values.h b/fnet/src/vespa/fnet/frt/values.h index 0c3b13b16f3..8ab8914d31f 100644 --- a/fnet/src/vespa/fnet/frt/values.h +++ b/fnet/src/vespa/fnet/frt/values.h @@ -72,12 +72,12 @@ public: class LocalBlob : public FRT_ISharedBlob { public: - LocalBlob(vespalib::DefaultAlloc data, uint32_t len) : + LocalBlob(vespalib::alloc::Alloc data, uint32_t len) : _data(std::move(data)), _len(len) { } LocalBlob(const char *data, uint32_t len) : - _data(len), + _data(vespalib::DefaultAlloc::create(len)), _len(len) { if (data != NULL) { @@ -85,7 +85,7 @@ public: } } void addRef() override {} - void subRef() override { vespalib::DefaultAlloc().swap(_data); } + void subRef() override { vespalib::alloc::Alloc().swap(_data); } uint32_t getLen() override { return _len; } const char *getData() override { return static_cast<const char *>(_data.get()); } char *getInternalData() { return static_cast<char *>(_data.get()); } @@ -93,7 +93,7 @@ public: LocalBlob(const LocalBlob &); LocalBlob &operator=(const LocalBlob &); - vespalib::DefaultAlloc _data; + vespalib::alloc::Alloc _data; uint32_t _len; }; @@ -439,7 +439,7 @@ public: _typeString[_numValues++] = FRT_VALUE_DATA; } - void AddData(vespalib::DefaultAlloc buf, uint32_t len) + void AddData(vespalib::alloc::Alloc buf, uint32_t len) { AddSharedData(new (_tub) LocalBlob(std::move(buf), len)); } diff --git a/install_java.cmake b/install_java.cmake index 5a1d4c8e73e..52d6ba19d2c 100644 --- a/install_java.cmake +++ b/install_java.cmake @@ -166,7 +166,6 @@ install(FILES document/src/vespa/document/config/documenttypes.def documentapi/src/main/resources/configdefinitions/documentrouteselectorpolicy.def fileacquirer/src/main/resources/configdefinitions/filedistributorrpc.def - fileacquirer/src/vespa/fileacquirer/filedistributorrpc.def filedistribution/src/vespa/filedistribution/distributor/filedistributor.def filedistribution/src/vespa/filedistribution/model/filereferences.def jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.client.http-client.def diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/application/OsgiFramework.java b/jdisc_core/src/main/java/com/yahoo/jdisc/application/OsgiFramework.java index 615b36fef1f..78b97caf57b 100644 --- a/jdisc_core/src/main/java/com/yahoo/jdisc/application/OsgiFramework.java +++ b/jdisc_core/src/main/java/com/yahoo/jdisc/application/OsgiFramework.java @@ -38,7 +38,7 @@ public interface OsgiFramework { * or the caller does not have the appropriate permissions, or the system {@link * BundleContext} is no longer valid. */ - public List<Bundle> installBundle(String bundleLocation) throws BundleException; + List<Bundle> installBundle(String bundleLocation) throws BundleException; /** * <p>Starts the given {@link Bundle}s. The parameter <tt>privileged</tt> tells the framework whether or not @@ -53,13 +53,13 @@ public interface OsgiFramework { * @throws SecurityException If the caller does not have the appropriate permissions. * @throws IllegalStateException If this bundle has been uninstalled or this bundle tries to change its own state. */ - public void startBundles(List<Bundle> bundles, boolean privileged) throws BundleException; + void startBundles(List<Bundle> bundles, boolean privileged) throws BundleException; /** * <p>This method <em>synchronously</em> refreshes all bundles currently loaded. Once this method returns, the * class loaders of all bundles will reflect on the current set of loaded bundles.</p> */ - public void refreshPackages(); + void refreshPackages(); /** * <p>Returns the BundleContext of this framework's system bundle. The returned BundleContext can be used by the @@ -70,7 +70,7 @@ public interface OsgiFramework { * @throws SecurityException If the caller does not have the appropriate permissions. * @since 2.0 */ - public BundleContext bundleContext(); + BundleContext bundleContext(); /** * <p>Returns an iterable collection of all installed bundles. This method returns a list of all bundles installed @@ -79,7 +79,7 @@ public interface OsgiFramework { * * @return An iterable collection of Bundle objects, one object per installed bundle. */ - public List<Bundle> bundles(); + List<Bundle> bundles(); /** * <p>This method starts the framework instance. Before this method is called, any call to {@link @@ -87,7 +87,7 @@ public interface OsgiFramework { * * @throws BundleException If any error occurs. */ - public void start() throws BundleException; + void start() throws BundleException; /** * <p>This method <em>synchronously</em> shuts down the framework. It must be called at the end of a session in @@ -95,5 +95,6 @@ public interface OsgiFramework { * * @throws BundleException If any error occurs. */ - public void stop() throws BundleException; + void stop() throws BundleException; + } diff --git a/jrt/src/com/yahoo/jrt/Acceptor.java b/jrt/src/com/yahoo/jrt/Acceptor.java index 05a7591ab74..7316f8c620b 100644 --- a/jrt/src/com/yahoo/jrt/Acceptor.java +++ b/jrt/src/com/yahoo/jrt/Acceptor.java @@ -13,7 +13,7 @@ import java.util.logging.Logger; * transport thread. To create an acceptor you need to invoke the * {@link Supervisor#listen listen} method in the {@link Supervisor} * class. - **/ + */ public class Acceptor { private class Run implements Runnable { @@ -34,15 +34,12 @@ public class Acceptor { private ServerSocketChannel serverChannel; - Acceptor(Transport parent, Supervisor owner, - Spec spec) throws ListenFailedException { - + Acceptor(Transport parent, Supervisor owner, Spec spec) throws ListenFailedException { this.parent = parent; this.owner = owner; - if (spec.malformed()) { - throw new ListenFailedException("Malformed spec"); - } + if (spec.malformed()) + throw new ListenFailedException("Malformed spec '" + spec + "'"); try { serverChannel = ServerSocketChannel.open(); @@ -55,7 +52,7 @@ public class Acceptor { if (serverChannel != null) { try { serverChannel.socket().close(); } catch (Exception x) {} } - throw new ListenFailedException("Listen failed", e); + throw new ListenFailedException("Failed to listen to " + spec, e); } thread.setDaemon(true); @@ -84,7 +81,7 @@ public class Acceptor { * @return listening spec, or null if not listening. **/ public Spec spec() { - if (!serverChannel.isOpen()) { + if ( ! serverChannel.isOpen()) { return null; } return new Spec(serverChannel.socket().getInetAddress().getHostName(), @@ -94,8 +91,7 @@ public class Acceptor { private void run() { while (serverChannel.isOpen()) { try { - parent.addConnection(new Connection(parent, owner, - serverChannel.accept())); + parent.addConnection(new Connection(parent, owner, serverChannel.accept())); parent.sync(); } catch (java.nio.channels.ClosedChannelException x) { } catch (Exception e) { diff --git a/jrt/src/com/yahoo/jrt/Connection.java b/jrt/src/com/yahoo/jrt/Connection.java index 7affa875cd6..52964726eb7 100644 --- a/jrt/src/com/yahoo/jrt/Connection.java +++ b/jrt/src/com/yahoo/jrt/Connection.java @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jrt; - import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.SelectionKey; @@ -35,10 +34,8 @@ class Connection extends Target { private Buffer output = new Buffer(WRITE_SIZE * 2); private int maxInputSize = 64*1024; private int maxOutputSize = 64*1024; - private Map<Integer, ReplyHandler> replyMap - = new HashMap<Integer, ReplyHandler>(); - private Map<TargetWatcher, TargetWatcher> watchers - = new IdentityHashMap<TargetWatcher, TargetWatcher>(); + private Map<Integer, ReplyHandler> replyMap = new HashMap<>(); + private Map<TargetWatcher, TargetWatcher> watchers = new IdentityHashMap<>(); private int activeReqs = 0; private int writeWork = 0; private Transport parent; @@ -52,8 +49,7 @@ class Connection extends Target { private void setState(int state) { if (state <= this.state) { - log.log(Level.WARNING, "Bogus state transition: " - + this.state + "->" + state); + log.log(Level.WARNING, "Bogus state transition: " + this.state + "->" + state); return; } boolean live = (this.state == INITIAL && state == CONNECTED); @@ -95,8 +91,7 @@ class Connection extends Target { owner.sessionInit(this); } - public Connection(Transport parent, Supervisor owner, - Spec spec, Object context) { + public Connection(Transport parent, Supervisor owner, Spec spec, Object context) { super(context); this.parent = parent; this.owner = owner; @@ -400,6 +395,6 @@ class Connection extends Target { if (channel != null) { return "Connection { " + channel.socket() + " }"; } - return "Connection { no socket }"; + return "Connection { no socket, spec " + spec + " }"; } } diff --git a/jrt/src/com/yahoo/jrt/Connector.java b/jrt/src/com/yahoo/jrt/Connector.java index fa43710b1f6..6778e047a8b 100644 --- a/jrt/src/com/yahoo/jrt/Connector.java +++ b/jrt/src/com/yahoo/jrt/Connector.java @@ -27,7 +27,7 @@ class Connector { } public void connectLater(Connection c) { - if (!connectQueue.enqueue(c)) { + if ( ! connectQueue.enqueue(c)) { parent.addConnection(c); } } diff --git a/jrt/src/com/yahoo/jrt/Request.java b/jrt/src/com/yahoo/jrt/Request.java index 99d7df8657e..4786124e56b 100644 --- a/jrt/src/com/yahoo/jrt/Request.java +++ b/jrt/src/com/yahoo/jrt/Request.java @@ -14,7 +14,7 @@ package com.yahoo.jrt; * client/server roles are independent of connection client/server * roles, since invocations can be performed both ways across a {@link * Target}. - **/ + */ public class Request { private String methodName; @@ -242,8 +242,7 @@ public class Request if (returnValues.satisfies(returnTypes)) { return true; } - setError(ErrorCode.WRONG_RETURN, - "checkReturnValues: Wrong return values"); + setError(ErrorCode.WRONG_RETURN, "checkReturnValues: Wrong return values"); return false; } diff --git a/jrt/src/com/yahoo/jrt/Spec.java b/jrt/src/com/yahoo/jrt/Spec.java index 7ed0aa69920..4c1f07b98a2 100644 --- a/jrt/src/com/yahoo/jrt/Spec.java +++ b/jrt/src/com/yahoo/jrt/Spec.java @@ -2,6 +2,8 @@ package com.yahoo.jrt; +import com.yahoo.net.HostName; + import java.net.SocketAddress; import java.net.InetSocketAddress; @@ -9,9 +11,9 @@ import java.net.InetSocketAddress; /** * A Spec is a network address used for either listening or * connecting. - **/ -public class Spec -{ + */ +public class Spec { + private SocketAddress address; private String host; private int port; @@ -24,11 +26,11 @@ public class Spec * * @param spec input string to be parsed * @see #malformed - **/ + */ public Spec(String spec) { if (spec.startsWith("tcp/")) { int sep = spec.indexOf(':'); - String portStr = null; + String portStr; if (sep == -1) { portStr = spec.substring(4); } else { @@ -52,7 +54,7 @@ public class Spec * * @param host host name * @param port port number - **/ + */ public Spec(String host, int port) { this.host = host; this.port = port; @@ -62,7 +64,7 @@ public class Spec * Create a Spec from a port number. * * @param port port number - **/ + */ public Spec(int port) { this.port = port; } @@ -71,7 +73,7 @@ public class Spec * Obtain the host name of this address * * @return host name - **/ + */ public String host() { return host; } @@ -80,7 +82,7 @@ public class Spec * Obtain the port number if this address * * @return port number - **/ + */ public int port() { return port; } @@ -90,7 +92,7 @@ public class Spec * you whether that string was malformed. * * @return true if this address is malformed - **/ + */ public boolean malformed() { return malformed; } @@ -100,7 +102,7 @@ public class Spec * malformed, this method will return null. * * @return socket address - **/ + */ SocketAddress address() { if (malformed) { return null; @@ -114,13 +116,13 @@ public class Spec } return address; } - + /** * Obtain a string representation of this address. The return * value from this method may be used to create a new Spec. * * @return string representation of this address - **/ + */ public String toString() { if (malformed) { return "MALFORMED"; @@ -130,4 +132,5 @@ public class Spec } return "tcp/" + host + ":" + port; } + } diff --git a/jrt/src/com/yahoo/jrt/Transport.java b/jrt/src/com/yahoo/jrt/Transport.java index 85bfed79732..6a9a978fb77 100644 --- a/jrt/src/com/yahoo/jrt/Transport.java +++ b/jrt/src/com/yahoo/jrt/Transport.java @@ -229,9 +229,8 @@ public class Transport { * @param context application context for the new connection * @param sync perform a synchronous connect in the calling thread * if this flag is set - **/ - Connection connect(Supervisor owner, Spec spec, - Object context, boolean sync) { + */ + Connection connect(Supervisor owner, Spec spec, Object context, boolean sync) { Connection conn = new Connection(this, owner, spec, context); if (sync) { addConnection(conn.connect()); diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/IMirror.java b/jrt/src/com/yahoo/jrt/slobrok/api/IMirror.java index 3662e6ad5b9..421590e72ce 100644 --- a/jrt/src/com/yahoo/jrt/slobrok/api/IMirror.java +++ b/jrt/src/com/yahoo/jrt/slobrok/api/IMirror.java @@ -4,8 +4,8 @@ package com.yahoo.jrt.slobrok.api; /** * Defines an interface for the name server lookup. * - * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a> - **/ + * @author Simon Thoresen + */ public interface IMirror { /** @@ -21,7 +21,7 @@ public interface IMirror { * @return a list of all matching services, with corresponding connect specs * @param pattern The pattern used for matching **/ - public Mirror.Entry[] lookup(String pattern); + Mirror.Entry[] lookup(String pattern); /** * Obtain the number of updates seen by this mirror. The value may wrap, but will never become 0 again. This can be @@ -30,5 +30,6 @@ public interface IMirror { * * @return number of slobrok updates seen **/ - public int updates(); + int updates(); + } diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java b/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java index 5e62cb61b76..81ec51e2b9e 100644 --- a/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java +++ b/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java @@ -1,16 +1,14 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jrt.slobrok.api; - import com.yahoo.jrt.*; -import java.util.Arrays; -import java.util.Random; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Map; import java.util.logging.Logger; import java.util.logging.Level; - /** * A Mirror object is used to keep track of the services registered * with a slobrok cluster. @@ -18,57 +16,19 @@ import java.util.logging.Level; * Updates to the service repository are fetched in the * background. Lookups against this object is done using an internal * mirror of the service repository. - **/ + */ public class Mirror implements IMirror { private static Logger log = Logger.getLogger(Mirror.class.getName()); - /** - * An Entry contains the name and connection spec for a single - * service. - **/ - public static final class Entry implements Comparable<Entry> { - private final String name; - private final String spec; - private final char [] nameArray; - - public Entry(String name, String spec) { - this.name = name; - this.spec = spec; - this.nameArray = name.toCharArray(); - } - - public boolean equals(Object rhs) { - if (rhs == null || !(rhs instanceof Entry)) { - return false; - } - Entry e = (Entry) rhs; - return (name.equals(e.name) && spec.equals(e.spec)); - } - - public int hashCode() { - return (name.hashCode() + spec.hashCode()); - } - - public int compareTo(Entry b) { - int diff = name.compareTo(b.name); - return diff != 0 - ? diff - : spec.compareTo(b.spec); - } - char [] getNameArray() { return nameArray; } - public String getName() { return name; } - public String getSpec() { return spec; } - } - private Supervisor orb; private SlobrokList slobroks; private String currSlobrok; private BackOffPolicy backOff; private volatile int updates = 0; - private boolean reqDone = false; + private boolean requestDone = false; private volatile Entry[] specs = new Entry[0]; - private int specsGen = 0; + private int specsGeneration = 0; private Task updateTask = null; private RequestWaiter reqWait = null; private Target target = null; @@ -87,11 +47,11 @@ public class Mirror implements IMirror { this.slobroks = slobroks; this.backOff = bop; updateTask = orb.transport().createTask(new Runnable() { - public void run() { handleUpdate(); } + public void run() { checkForUpdate(); } }); reqWait = new RequestWaiter() { public void handleRequestDone(Request req) { - reqDone = true; + requestDone = true; updateTask.scheduleNow(); } }; @@ -104,7 +64,7 @@ public class Mirror implements IMirror { * * @param orb the Supervisor to use * @param slobroks slobrok connect spec list - **/ + */ public Mirror(Supervisor orb, SlobrokList slobroks) { this(orb, slobroks, new BackOff()); } @@ -112,7 +72,7 @@ public class Mirror implements IMirror { /** * Shut down the Mirror. This will close any open connections and * stop the regular mirror updates. - **/ + */ public void shutdown() { updateTask.kill(); orb.transport().perform(new Runnable() { @@ -122,12 +82,11 @@ public class Mirror implements IMirror { @Override public Entry[] lookup(String pattern) { - ArrayList<Entry> found = new ArrayList<Entry>(); - Entry [] e = specs; - char [] p = pattern.toCharArray(); - for (int i = 0; i < e.length; i++) { - if (match(e[i].getNameArray(), p)) { - found.add(e[i]); + ArrayList<Entry> found = new ArrayList<>(); + char[] p = pattern.toCharArray(); + for (Entry specEntry : specs) { + if (match(specEntry.getNameArray(), p)) { + found.add(specEntry); } } return found.toArray(new Entry[found.size()]); @@ -145,7 +104,7 @@ public class Mirror implements IMirror { * (or if it never does, time out and tell the user there was no answer from any Service Location Broker). * * @return true if the MirrorAPI object has asked for updates from a Slobrok and got any answer back - **/ + */ public boolean ready() { return (updates != 0); } @@ -167,7 +126,7 @@ public class Mirror implements IMirror { * @return true if the name matches the pattern * @param name the name * @param pattern the pattern - **/ + */ static boolean match(char [] name, char [] pattern) { int ni = 0; int pi = 0; @@ -197,95 +156,58 @@ public class Mirror implements IMirror { /** * Invoked by the update task. - **/ - private void handleUpdate() { - if (reqDone) { - reqDone = false; - - if (req.errorCode() == ErrorCode.NONE && - req.returnValues().satisfies("SSi") && - req.returnValues().get(0).count() == req.returnValues().get(1).count()) - { - Values answer = req.returnValues(); - - if (specsGen != answer.get(2).asInt32()) { - - int numNames = answer.get(0).count(); - String[] n = answer.get(0).asStringArray(); - String[] s = answer.get(1).asStringArray(); - Entry[] newSpecs = new Entry[numNames]; - - for (int idx = 0; idx < numNames; idx++) { - newSpecs[idx] = new Entry(n[idx], s[idx]); - } - - specs = newSpecs; - - specsGen = answer.get(2).asInt32(); - int u = (updates + 1); - if (u == 0) { - u++; - } - updates = u; + */ + private void checkForUpdate() { + if (requestDone) { + handleUpdate(); + requestDone = false; + return; + } + + if (target != null && ! slobroks.contains(currSlobrok)) { + target.close(); + target = null; + } + if (target == null) { + currSlobrok = slobroks.nextSlobrokSpec(); + if (currSlobrok == null) { + double delay = backOff.get(); + updateTask.schedule(delay); + if (backOff.shouldWarn(delay)) { + log.log(Level.INFO, "no location brokers available " + + "(retry in " + delay + " seconds) for: " + slobroks); } - backOff.reset(); - updateTask.schedule(0.1); // be nice - return; - } - if (!req.checkReturnTypes("iSSSi") - || (req.returnValues().get(2).count() != - req.returnValues().get(3).count())) - { - target.close(); - target = null; - updateTask.scheduleNow(); // try next slobrok return; } + target = orb.connect(new Spec(currSlobrok)); + specsGeneration = 0; + } + req = new Request("slobrok.incremental.fetch"); + req.parameters().add(new Int32Value(specsGeneration)); // gencnt + req.parameters().add(new Int32Value(5000)); // mstimeout + target.invokeAsync(req, 40.0, reqWait); + } + + private void handleUpdate() { + if (req.errorCode() == ErrorCode.NONE && + req.returnValues().satisfies("SSi") && + req.returnValues().get(0).count() == req.returnValues().get(1).count()) + { + Values answer = req.returnValues(); + if (specsGeneration != answer.get(2).asInt32()) { - Values answer = req.returnValues(); + int numNames = answer.get(0).count(); + String[] n = answer.get(0).asStringArray(); + String[] s = answer.get(1).asStringArray(); + Entry[] newSpecs = new Entry[numNames]; - int diffFrom = answer.get(0).asInt32(); - int diffTo = answer.get(4).asInt32(); - - if (specsGen != diffTo) { - - int nRemoves = answer.get(1).count(); - String[] r = answer.get(1).asStringArray(); - - int numNames = answer.get(2).count(); - String[] n = answer.get(2).asStringArray(); - String[] s = answer.get(3).asStringArray(); - - - Entry[] newSpecs; - if (diffFrom == 0) { - newSpecs = new Entry[numNames]; - - for (int idx = 0; idx < numNames; idx++) { - newSpecs[idx] = new Entry(n[idx], s[idx]); - } - } else { - java.util.HashMap<String, Entry> map = new java.util.HashMap<String, Entry>(); - for (Entry e : specs) { - map.put(e.getName(), e); - } - for (String rem : r) { - map.remove(rem); - } - for (int idx = 0; idx < numNames; idx++) { - map.put(n[idx], new Entry(n[idx], s[idx])); - } - newSpecs = new Entry[map.size()]; - int idx = 0; - for (Entry e : map.values()) { - newSpecs[idx++] = e; - } + for (int idx = 0; idx < numNames; idx++) { + newSpecs[idx] = new Entry(n[idx], s[idx]); } - specs = newSpecs; - specsGen = diffTo; + specsGeneration = answer.get(2).asInt32(); int u = (updates + 1); if (u == 0) { u++; @@ -296,34 +218,72 @@ public class Mirror implements IMirror { updateTask.schedule(0.1); // be nice return; } - if (target != null && ! slobroks.contains(currSlobrok)) { + if (!req.checkReturnTypes("iSSSi") + || (req.returnValues().get(2).count() != + req.returnValues().get(3).count())) + { target.close(); target = null; + updateTask.scheduleNow(); // try next slobrok + return; } - if (target == null) { - currSlobrok = slobroks.nextSlobrokSpec(); - if (currSlobrok == null) { - double delay = backOff.get(); - updateTask.schedule(delay); - if (backOff.shouldWarn(delay)) { - log.log(Level.INFO, "no location brokers available " - + "(retry in " + delay + " seconds) for: " + slobroks); + + + Values answer = req.returnValues(); + + int diffFromGeneration = answer.get(0).asInt32(); + int diffToGeneration = answer.get(4).asInt32(); + if (specsGeneration != diffToGeneration) { + + int nRemoves = answer.get(1).count(); + String[] r = answer.get(1).asStringArray(); + + int numNames = answer.get(2).count(); + String[] n = answer.get(2).asStringArray(); + String[] s = answer.get(3).asStringArray(); + + Entry[] newSpecs; + if (diffFromGeneration == 0) { + newSpecs = new Entry[numNames]; + + for (int idx = 0; idx < numNames; idx++) { + newSpecs[idx] = new Entry(n[idx], s[idx]); + } + } else { + Map<String, Entry> map = new HashMap<>(); + for (Entry e : specs) { + map.put(e.getName(), e); + } + for (String rem : r) { + map.remove(rem); + } + for (int idx = 0; idx < numNames; idx++) { + map.put(n[idx], new Entry(n[idx], s[idx])); + } + newSpecs = new Entry[map.size()]; + int idx = 0; + for (Entry e : map.values()) { + newSpecs[idx++] = e; } - return; } - target = orb.connect(new Spec(currSlobrok)); - specsGen = 0; + + specs = newSpecs; + + specsGeneration = diffToGeneration; + int u = (updates + 1); + if (u == 0) { + u++; + } + updates = u; } - req = new Request("slobrok.incremental.fetch"); - req.parameters().add(new Int32Value(specsGen)); // gencnt - req.parameters().add(new Int32Value(5000)); // mstimeout - target.invokeAsync(req, 40.0, reqWait); + backOff.reset(); + updateTask.schedule(0.1); // be nice } /** * Invoked from the transport thread, requested by the shutdown * method. - **/ + */ private void handleShutdown() { if (req != null) { req.abort(); @@ -334,4 +294,44 @@ public class Mirror implements IMirror { target = null; } } + + /** + * An Entry contains the name and connection spec for a single + * service. + */ + public static final class Entry implements Comparable<Entry> { + + private final String name; + private final String spec; + private final char [] nameArray; + + public Entry(String name, String spec) { + this.name = name; + this.spec = spec; + this.nameArray = name.toCharArray(); + } + + public boolean equals(Object rhs) { + if (rhs == null || !(rhs instanceof Entry)) { + return false; + } + Entry e = (Entry) rhs; + return (name.equals(e.name) && spec.equals(e.spec)); + } + + public int hashCode() { + return (name.hashCode() + spec.hashCode()); + } + + public int compareTo(Entry b) { + int diff = name.compareTo(b.name); + return diff != 0 ? diff : spec.compareTo(b.spec); + } + + char [] getNameArray() { return nameArray; } + public String getName() { return name; } + public String getSpec() { return spec; } + + } + } diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/Register.java b/jrt/src/com/yahoo/jrt/slobrok/api/Register.java index 84720501ff8..d1ea7a7f1fa 100644 --- a/jrt/src/com/yahoo/jrt/slobrok/api/Register.java +++ b/jrt/src/com/yahoo/jrt/slobrok/api/Register.java @@ -1,22 +1,21 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.jrt.slobrok.api; - import com.yahoo.jrt.*; import java.util.ArrayList; +import java.util.List; import java.util.Random; import java.util.logging.Logger; import java.util.logging.Level; - /** * A Register object is used to register and unregister services with * a slobrok cluster. * * The register/unregister operations performed against this object - * are stored in a todo list that will be performed asynchronously + * are stored in a to-do list that will be performed asynchronously * against the slobrok cluster as soon as possible. - **/ + */ public class Register { private static Logger log = Logger.getLogger(Register.class.getName()); @@ -27,9 +26,9 @@ public class Register { private String mySpec; private BackOffPolicy backOff; private boolean reqDone = false; - private ArrayList<String> names = new ArrayList<String>(); - private ArrayList<String> pending = new ArrayList<String>(); - private ArrayList<String> unreg = new ArrayList<String>(); + private List<String> names = new ArrayList<>(); + private List<String> pending = new ArrayList<>(); + private List<String> unreg = new ArrayList<>(); private Task updateTask = null; private RequestWaiter reqWait = null; private Target target = null; @@ -39,9 +38,9 @@ public class Register { /** * Remove all instances of name from list. - **/ - private void discard(ArrayList<String> list, String name) { - ArrayList<String> tmp = new ArrayList<String>(); + */ + private void discard(List<String> list, String name) { + List<String> tmp = new ArrayList<>(); tmp.add(name); list.removeAll(tmp); } @@ -54,7 +53,7 @@ public class Register { * @param slobroks slobrok connect spec list * @param spec the Spec representing hostname and port for this host * @param bop custom backoff policy, mostly useful for testing - **/ + */ public Register(Supervisor orb, SlobrokList slobroks, Spec spec, BackOffPolicy bop) { this.orb = orb; this.slobroks = slobroks; @@ -98,7 +97,7 @@ public class Register { * @param orb the Supervisor to use * @param slobroks slobrok connect spec list * @param spec the Spec representing hostname and port for this host - **/ + */ public Register(Supervisor orb, SlobrokList slobroks, Spec spec) { this(orb, slobroks, spec, new BackOff()); } @@ -111,9 +110,8 @@ public class Register { * @param slobroks slobrok connect spec list * @param myHost the hostname of this host * @param myPort the port number we are listening to - **/ - public Register(Supervisor orb, SlobrokList slobroks, - String myHost, int myPort) { + */ + public Register(Supervisor orb, SlobrokList slobroks, String myHost, int myPort) { this(orb, slobroks, new Spec(myHost, myPort)); } @@ -121,7 +119,7 @@ public class Register { /** * Shut down the Register. This will close any open connections * and stop the regular re-registration. - **/ + */ public void shutdown() { updateTask.kill(); orb.transport().perform(new Runnable() { @@ -133,7 +131,7 @@ public class Register { * Register a service with the slobrok cluster. * * @param name service name - **/ + */ public synchronized void registerName(String name) { if (names.indexOf(name) >= 0) { return; @@ -148,7 +146,7 @@ public class Register { * Unregister a service with the slobrok cluster * * @param name service name - **/ + */ public synchronized void unregisterName(String name) { discard(names, name); discard(pending, name); @@ -164,15 +162,11 @@ public class Register { reqDone = false; if (req.isError()) { if (req.errorCode() != ErrorCode.METHOD_FAILED) { - log.log(Level.FINE, "register failed: " - + req.errorMessage() - + " (code " + req.errorCode() + ")"); + log.log(Level.FINE, "register failed: " + req.errorMessage() + " (code " + req.errorCode() + ")"); target.close(); target = null; } else { - log.log(Level.WARNING, "register failed: " - + req.errorMessage() - + " (code " + req.errorCode() + ")"); + log.log(Level.WARNING, "register failed: " + req.errorMessage() + " (code " + req.errorCode() + ")"); } } else { backOff.reset(); @@ -192,13 +186,10 @@ public class Register { if (currSlobrok == null) { double delay = backOff.get(); updateTask.schedule(delay); - if (backOff.shouldWarn(delay)) { - log.log(Level.WARNING, "slobrok connection problems " - + "(retry in " + delay + " seconds) to: " + slobroks); - } else { - log.log(Level.FINE, "slobrok retry in " + delay - + " seconds"); - } + if (backOff.shouldWarn(delay)) + log.log(Level.WARNING, "slobrok connection problems (retry in " + delay + " seconds) to: " + slobroks); + else + log.log(Level.FINE, "slobrok retry in " + delay + " seconds"); return; } target = orb.connect(new Spec(currSlobrok)); @@ -207,16 +198,14 @@ public class Register { pending.addAll(names); } } - boolean rem = false; - boolean reg = false; + boolean unregister = false; String name; synchronized (this) { if (unreg.size() > 0) { name = unreg.remove(unreg.size() - 1); - rem = true; + unregister = true; } else if (pending.size() > 0) { name = pending.remove(pending.size() - 1); - reg = true; } else { pending.addAll(names); log.log(Level.FINE, "done, reschedule in 30s"); @@ -225,13 +214,13 @@ public class Register { } } - if (rem) { + if (unregister) { req = new Request("slobrok.unregisterRpcServer"); req.parameters().add(new StringValue(name)); log.log(Level.FINE, "unregister [" + name + "]"); req.parameters().add(new StringValue(mySpec)); target.invokeAsync(req, 35.0, reqWait); - } else if (reg) { + } else { // register req = new Request("slobrok.registerRpcServer"); req.parameters().add(new StringValue(name)); log.log(Level.FINE, "register [" + name + "]"); @@ -246,8 +235,7 @@ public class Register { } private void handleRpcUnreg(Request req) { - log.log(Level.WARNING, "unregistered name " - + req.parameters().get(0).asString()); + log.log(Level.WARNING, "unregistered name " + req.parameters().get(0).asString()); } /** @@ -266,4 +254,5 @@ public class Register { target = null; } } + } diff --git a/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java b/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java index 085489897b5..3c81f9618f8 100644 --- a/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java +++ b/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java @@ -5,79 +5,16 @@ import com.yahoo.jrt.*; import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; public class Slobrok { - private class RegisterCallback implements RequestWaiter { - - Request registerReq; - String name; - String spec; - Target target; - - public RegisterCallback(Request req, String name, String spec) { - req.detach(); - registerReq = req; - this.name = name; - this.spec = spec; - target = orb.connect(new Spec(spec)); - Request cbReq = new Request("slobrok.callback.listNamesServed"); - target.invokeAsync(cbReq, 5.0, this); - } - - public void handleRequestDone(Request req) { - if (!req.checkReturnTypes("S")) { - registerReq.setError(ErrorCode.METHOD_FAILED, "error during register callback: " - + req.errorMessage()); - registerReq.returnRequest(); - target.close(); - return; - } - String[] names = req.returnValues().get(0).asStringArray(); - boolean found = false; - for (String n : names) { - if (n.equals(name)) { - found = true; - } - } - if (!found) { - registerReq.setError(ErrorCode.METHOD_FAILED, "register failed: " - + "served names does not contain name"); - registerReq.returnRequest(); - target.close(); - return; - } - handleRegisterCallbackDone(registerReq, name, spec, target); - } - } - - private class FetchMirror implements Runnable { - public final Request req; - public final Task task; - - public FetchMirror(Request req, int timeout) { - req.detach(); - this.req = req; - task = orb.transport().createTask(this); - task.schedule(((double)timeout)/1000.0); - } - public void run() { // timeout - handleFetchMirrorTimeout(this); - } - } - - private class TargetMonitor implements TargetWatcher { - public void notifyTargetInvalid(Target target) { - handleTargetDown(target); - } - } - Supervisor orb; Acceptor listener; - HashMap<String,String> services = new HashMap<String,String>(); - ArrayList<FetchMirror> pendingFetch = new ArrayList<FetchMirror>(); - HashMap<String,Target> targets = new HashMap<String,Target>(); + private Map<String,String> services = new HashMap<>(); + List<FetchMirror> pendingFetch = new ArrayList<>(); + Map<String,Target> targets = new HashMap<>(); TargetMonitor monitor = new TargetMonitor(); int gencnt = 1; @@ -123,15 +60,11 @@ public class Slobrok { handleFetchMirrorFlush(); } - private void handleRegisterCallbackDone(Request req, - String name, String spec, - Target target) - { + private void handleRegisterCallbackDone(Request req, String name, String spec, Target target){ String stored = services.get(name); if (stored != null) { // too late - if (!stored.equals(spec)) { - req.setError(ErrorCode.METHOD_FAILED, - "service '" + name + "' registered with another spec"); + if ( ! stored.equals(spec)) { + req.setError(ErrorCode.METHOD_FAILED, "service '" + name + "' registered with another spec"); } req.returnRequest(); target.close(); @@ -153,8 +86,8 @@ public class Slobrok { } private void dumpServices(Request req) { - ArrayList<String> names = new ArrayList<String>(); - ArrayList<String> specs = new ArrayList<String>(); + List<String> names = new ArrayList<>(); + List<String> specs = new ArrayList<>(); for (Map.Entry<String,String> entry : services.entrySet()) { names.add(entry.getKey()); specs.add(entry.getValue()); @@ -225,12 +158,8 @@ public class Slobrok { if (stored == null) { new RegisterCallback(req, name, spec); } else { - if (stored.equals(spec)) { - // ok, already stored - } else { - req.setError(ErrorCode.METHOD_FAILED, - "service '" + name + "' registered with another spec"); - } + if ( ! stored.equals(spec)) + req.setError(ErrorCode.METHOD_FAILED, "service '" + name + "' registered with another spec"); } } @@ -267,4 +196,67 @@ public class Slobrok { } } + private class RegisterCallback implements RequestWaiter { + + Request registerReq; + String name; + String spec; + Target target; + + public RegisterCallback(Request req, String name, String spec) { + req.detach(); + registerReq = req; + this.name = name; + this.spec = spec; + target = orb.connect(new Spec(spec)); + Request cbReq = new Request("slobrok.callback.listNamesServed"); + target.invokeAsync(cbReq, 5.0, this); + } + + @Override + public void handleRequestDone(Request req) { + if ( ! req.checkReturnTypes("S")) { + registerReq.setError(ErrorCode.METHOD_FAILED, "error during register callback: " + req.errorMessage()); + registerReq.returnRequest(); + target.close(); + return; + } + String[] names = req.returnValues().get(0).asStringArray(); + boolean found = false; + for (String n : names) { + if (n.equals(name)) { + found = true; + } + } + if (!found) { + registerReq.setError(ErrorCode.METHOD_FAILED, "register failed: served names does not contain name"); + registerReq.returnRequest(); + target.close(); + return; + } + handleRegisterCallbackDone(registerReq, name, spec, target); + } + } + + private class FetchMirror implements Runnable { + public final Request req; + public final Task task; + + public FetchMirror(Request req, int timeout) { + req.detach(); + this.req = req; + task = orb.transport().createTask(this); + task.schedule(((double)timeout)/1000.0); + } + public void run() { // timeout + handleFetchMirrorTimeout(this); + } + } + + private class TargetMonitor implements TargetWatcher { + public void notifyTargetInvalid(Target target) { + handleTargetDown(target); + } + } + } diff --git a/linguistics/src/main/java/com/yahoo/language/Language.java b/linguistics/src/main/java/com/yahoo/language/Language.java index 0bf00f1230a..626fb2eac01 100644 --- a/linguistics/src/main/java/com/yahoo/language/Language.java +++ b/linguistics/src/main/java/com/yahoo/language/Language.java @@ -586,9 +586,8 @@ public enum Language { * @return the language given by the encoding, or {@link #UNKNOWN} if not determined. */ public static Language fromEncoding(String encoding) { - if (encoding == null) { - return UNKNOWN; - } + if (encoding == null) return UNKNOWN; + return fromLowerCasedEncoding(Lowercase.toLowerCase(encoding)); } diff --git a/linguistics/src/main/java/com/yahoo/language/process/StemMode.java b/linguistics/src/main/java/com/yahoo/language/process/StemMode.java index 269b08dcdf7..ebacb307a85 100644 --- a/linguistics/src/main/java/com/yahoo/language/process/StemMode.java +++ b/linguistics/src/main/java/com/yahoo/language/process/StemMode.java @@ -6,7 +6,7 @@ package com.yahoo.language.process; * Stemming implementation may support a smaller number of modes by mapping a mode to a more * inclusive alternative. * - * @author <a href="mailto:mathiasm@yahoo-inc.com">Mathias Mølster Lidal</a> + * @author Mathias Mølster Lidal */ public enum StemMode { diff --git a/logserver/src/main/java/com/yahoo/logserver/net/LogConnection.java b/logserver/src/main/java/com/yahoo/logserver/net/LogConnection.java index 1a48c5fb0f4..192e0769b6f 100644 --- a/logserver/src/main/java/com/yahoo/logserver/net/LogConnection.java +++ b/logserver/src/main/java/com/yahoo/logserver/net/LogConnection.java @@ -206,7 +206,7 @@ public class LogConnection implements Connection { return; } int count = 200; - log.log(LogLevel.WARNING, "Log message too long. Message from " + log.log(LogLevel.DEBUG, "Log message too long. Message from " + socket.socket().getInetAddress() + " exceeds " + readBuffer.capacity() + ". Skipping buffer (might be part of same long message). Printing first " + count + " characters of line: " + diff --git a/logserver/src/test/java/com/yahoo/logserver/net/test/LogConnectionTestCase.java b/logserver/src/test/java/com/yahoo/logserver/net/test/LogConnectionTestCase.java index 06f35567e9f..90835930fc0 100644 --- a/logserver/src/test/java/com/yahoo/logserver/net/test/LogConnectionTestCase.java +++ b/logserver/src/test/java/com/yahoo/logserver/net/test/LogConnectionTestCase.java @@ -32,7 +32,6 @@ import static org.junit.Assert.*; public class LogConnectionTestCase { private static final Logger log = Logger.getLogger(LogConnectionTestCase.class.getName()); - private static final int PROBING_RANGE_START = 41352; private static final Charset charset = Charset.forName("utf-8"); private static final ByteBuffer bigBuffer; private int port; @@ -48,7 +47,7 @@ public class LogConnectionTestCase { prefix = prefix.substring(0, prefix.length() - 1); sb.append(prefix); - // fill up the remainding buffer with rubbish to make + // fill up the remaining buffer with rubbish to make // it too long for (int i = 0; i < (LogConnection.READBUFFER_SIZE * 3); i++) { sb.append("a"); diff --git a/memfilepersistence/src/tests/spi/buffer_test.cpp b/memfilepersistence/src/tests/spi/buffer_test.cpp index a2d917301fc..0addb1032f5 100644 --- a/memfilepersistence/src/tests/spi/buffer_test.cpp +++ b/memfilepersistence/src/tests/spi/buffer_test.cpp @@ -36,9 +36,8 @@ BufferTest::getSizeReturnsInitiallyAllocatedSize() void BufferTest::getSizeReturnsUnAlignedSizeForMMappedAllocs() { - Buffer buf(vespalib::MMapAlloc::HUGEPAGE_SIZE + 1); - CPPUNIT_ASSERT_EQUAL(size_t(vespalib::MMapAlloc::HUGEPAGE_SIZE + 1), - buf.getSize()); + Buffer buf(vespalib::alloc::MemoryAllocator::HUGEPAGE_SIZE + 1); + CPPUNIT_ASSERT_EQUAL(size_t(vespalib::alloc::MemoryAllocator::HUGEPAGE_SIZE + 1), buf.getSize()); } void diff --git a/memfilepersistence/src/vespa/memfilepersistence/common/filespecification.h b/memfilepersistence/src/vespa/memfilepersistence/common/filespecification.h index 4d9cda2c47c..63dd0d3172e 100644 --- a/memfilepersistence/src/vespa/memfilepersistence/common/filespecification.h +++ b/memfilepersistence/src/vespa/memfilepersistence/common/filespecification.h @@ -20,8 +20,7 @@ namespace memfile { class MemFileEnvironment; class FileSpecification : private Types, - public vespalib::Printable, - public boost::operators<FileSpecification> + public vespalib::Printable { BucketId _bucketId; Directory* _dir; diff --git a/memfilepersistence/src/vespa/memfilepersistence/common/options.h b/memfilepersistence/src/vespa/memfilepersistence/common/options.h index 831f43ab603..a83657a9189 100644 --- a/memfilepersistence/src/vespa/memfilepersistence/common/options.h +++ b/memfilepersistence/src/vespa/memfilepersistence/common/options.h @@ -16,7 +16,6 @@ #pragma once -#include <boost/operators.hpp> #include <vespa/vespalib/util/printable.h> #include <vespa/fastos/types.h> // For uint32_t on linux #include <string> @@ -29,8 +28,7 @@ namespace storage { namespace memfile { -struct Options : public vespalib::Printable, - public boost::operators<Options> +struct Options : public vespalib::Printable { // Parameters from def file. See config file for comments. diff --git a/memfilepersistence/src/vespa/memfilepersistence/common/types.h b/memfilepersistence/src/vespa/memfilepersistence/common/types.h index bf4bdc98222..71a9b411e6c 100644 --- a/memfilepersistence/src/vespa/memfilepersistence/common/types.h +++ b/memfilepersistence/src/vespa/memfilepersistence/common/types.h @@ -39,7 +39,7 @@ namespace memfile { * of zero with a non-zero position is invalid, and used to indicate that this * value is not set yet. (Typically when data isn't persisted to disk yet) */ -struct DataLocation : public boost::operators<DataLocation> { +struct DataLocation { uint32_t _pos; uint32_t _size; @@ -52,8 +52,12 @@ struct DataLocation : public boost::operators<DataLocation> { bool valid() const { return (_size > 0 || _pos == 0); } - bool operator==(const DataLocation& other) const - { return (_pos == other._pos && _size == other._size); } + bool operator==(const DataLocation& other) const { + return (_pos == other._pos && _size == other._size); + } + bool operator!=(const DataLocation& other) const { + return ! (*this == other); + } bool operator<(const DataLocation& other) const { if (_pos == other._pos) { diff --git a/memfilepersistence/src/vespa/memfilepersistence/mapper/buffer.cpp b/memfilepersistence/src/vespa/memfilepersistence/mapper/buffer.cpp index 5ecb439b3f0..a998bb7d90e 100644 --- a/memfilepersistence/src/vespa/memfilepersistence/mapper/buffer.cpp +++ b/memfilepersistence/src/vespa/memfilepersistence/mapper/buffer.cpp @@ -5,11 +5,18 @@ #include <algorithm> #include <stdlib.h> +using vespalib::DefaultAlloc; +using vespalib::alloc::MemoryAllocator; +using vespalib::alloc::Alloc; + namespace storage { namespace memfile { +// Use AutoAlloc to transparently use mmap for large buffers. +// It is crucial that any backing buffer type returns an address that is +// 512-byte aligned, or direct IO will scream at us and fail everything. Buffer::Buffer(size_t size) - : _buffer(size), + : _buffer(DefaultAlloc::create(size, MemoryAllocator::HUGEPAGE_SIZE, 512)), _size(size) { } @@ -17,7 +24,7 @@ Buffer::Buffer(size_t size) void Buffer::resize(size_t size) { - BackingType buffer(size); + Alloc buffer = _buffer.create(size); size_t commonSize(std::min(size, _size)); memcpy(buffer.get(), _buffer.get(), commonSize); _buffer.swap(buffer); diff --git a/memfilepersistence/src/vespa/memfilepersistence/mapper/buffer.h b/memfilepersistence/src/vespa/memfilepersistence/mapper/buffer.h index d097a078af9..26f4a644d0c 100644 --- a/memfilepersistence/src/vespa/memfilepersistence/mapper/buffer.h +++ b/memfilepersistence/src/vespa/memfilepersistence/mapper/buffer.h @@ -19,13 +19,7 @@ namespace memfile { class Buffer { - // Use AutoAlloc to transparently use mmap for large buffers. - // It is crucial that any backing buffer type returns an address that is - // 512-byte aligned, or direct IO will scream at us and fail everything. - static constexpr size_t MMapLimit = vespalib::MMapAlloc::HUGEPAGE_SIZE; - using BackingType = vespalib::AutoAlloc<MMapLimit, 512>; - - BackingType _buffer; + vespalib::alloc::Alloc _buffer; // Actual, non-aligned size (as opposed to _buffer.size()). size_t _size; diff --git a/memfilepersistence/src/vespa/memfilepersistence/mapper/bufferedfilewriter.cpp b/memfilepersistence/src/vespa/memfilepersistence/mapper/bufferedfilewriter.cpp index 369df0c1143..1cc765ada5a 100644 --- a/memfilepersistence/src/vespa/memfilepersistence/mapper/bufferedfilewriter.cpp +++ b/memfilepersistence/src/vespa/memfilepersistence/mapper/bufferedfilewriter.cpp @@ -2,7 +2,6 @@ #include <vespa/fastos/fastos.h> #include <vespa/memfilepersistence/mapper/bufferedfilewriter.h> -#include <boost/scoped_array.hpp> #include <vespa/vespalib/util/guard.h> #include <vespa/log/log.h> #include <vespa/vespalib/io/fileutil.h> @@ -162,7 +161,7 @@ void BufferedFileWriter::writeGarbage(uint32_t size) { ValueGuard<uint32_t> filePositionGuard(_filePosition); uint32_t maxBufferSize = 0xFFFF; uint32_t bufSize = (size > maxBufferSize ? maxBufferSize : size); - boost::scoped_array<char> buf(new char[bufSize]); + std::unique_ptr<char[]> buf(new char[bufSize]); while (size > 0) { uint32_t part = (size > bufSize ? bufSize : size); write(&buf[0], part, _filePosition); diff --git a/memfilepersistence/src/vespa/memfilepersistence/mapper/simplememfileiobuffer.h b/memfilepersistence/src/vespa/memfilepersistence/mapper/simplememfileiobuffer.h index 8dbffcaf795..c564893a154 100644 --- a/memfilepersistence/src/vespa/memfilepersistence/mapper/simplememfileiobuffer.h +++ b/memfilepersistence/src/vespa/memfilepersistence/mapper/simplememfileiobuffer.h @@ -77,7 +77,7 @@ public: typedef vespalib::LinkedPtr<SharedBuffer> LP; explicit SharedBuffer(size_t totalSize) - : _buf(totalSize), + : _buf(vespalib::alloc::MMapAllocFactory::create(totalSize)), _usedSize(0) { } @@ -115,7 +115,7 @@ public: return static_cast<const char*>(_buf.get()); } private: - vespalib::MMapAlloc _buf; + vespalib::alloc::Alloc _buf; size_t _usedSize; }; diff --git a/memfilepersistence/src/vespa/memfilepersistence/memfile/memslot.h b/memfilepersistence/src/vespa/memfilepersistence/memfile/memslot.h index 53a20a86f8a..4ea44a45996 100644 --- a/memfilepersistence/src/vespa/memfilepersistence/memfile/memslot.h +++ b/memfilepersistence/src/vespa/memfilepersistence/memfile/memslot.h @@ -28,8 +28,7 @@ namespace memfile { class MemFile; -class MemSlot : private Types, - private boost::operators<MemSlot> +class MemSlot : private Types { // Metadata for slot we need to keep. Timestamp _timestamp; // 64 bit - 8 bytes timestamp @@ -166,6 +165,9 @@ public: * Used in unit testing only. */ bool operator==(const MemSlot& other) const; + bool operator!=(const MemSlot& other) const { + return ! (*this == other); + } // Implement print functions so we can be used similar to as we were // a document::Printable (Don't want inheritance in this class) diff --git a/memfilepersistence/src/vespa/memfilepersistence/memfile/slotiterator.h b/memfilepersistence/src/vespa/memfilepersistence/memfile/slotiterator.h index c10075ef143..d62bdf10025 100644 --- a/memfilepersistence/src/vespa/memfilepersistence/memfile/slotiterator.h +++ b/memfilepersistence/src/vespa/memfilepersistence/memfile/slotiterator.h @@ -20,7 +20,6 @@ #pragma once -#include <boost/operators.hpp> #include <vespa/memfilepersistence/common/types.h> #include <vespa/vespalib/stllike/hash_set.h> @@ -97,7 +96,7 @@ public: * implementation in order to be able to return iterators by value, as one is * acustomed to in the standard library. */ -class IteratorWrapper : public boost::operators<IteratorWrapper> { +class IteratorWrapper { SlotIterator::CUP _it; public: @@ -116,6 +115,9 @@ public: const MemSlot* slot2(o._it.get() == 0 ? 0 : o._it->getCurrent()); return (slot == slot2); } + bool operator!=(const IteratorWrapper& o) const { + return ! (*this == o); + } const MemSlot& operator*() const { return *_it->getCurrent(); } const MemSlot* operator->() const { return _it->getCurrent(); } diff --git a/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java b/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java index 729bef7985f..cf5beb4a903 100644 --- a/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java +++ b/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java @@ -22,33 +22,34 @@ import java.util.logging.Logger; * and forward messages.</p> * * <p>There are three types of sessions:</p> - * <ul><li>{@link SourceSession Source sessions} sends messages and receives - * replies</li> - * <li>{@link IntermediateSession Intermediate sessions} receives messages on - * their way to their final destination, and may decide to forward the messages - * or reply directly.</li> - * <li>{@link DestinationSession Destination sessions} are the final recipient - * of messages, and are expected to reply to every one of them, but may not - * forward messages.</li></ul> + * <ul> + * <li>{@link SourceSession Source sessions} sends messages and receives replies</li> + * <li>{@link IntermediateSession Intermediate sessions} receives messages on + * their way to their final destination, and may decide to forward the messages or reply directly. + * <li>{@link DestinationSession Destination sessions} are the final recipient + * of messages, and are expected to reply to every one of them, but may not forward messages. + * </ul> * * <p>A message bus is configured with a {@link Protocol protocol}. This table * enumerates the permissible routes from intermediates to destinations and the * messaging semantics of each hop.</p> * - * <p>The responsibilities of a message bus are:</p> - * <ul> <li>Assign a route to every send message from its routing table</li> - * <li>Deliver every message it <i>accepts</i> to the next hop on its route on a - * best effort basis, <i>or</i> deliver a <i>failure reply</i>.</li> - * <li>Deliver replies back to message sources through all the intermediate - * hops.</li></ul> + * The responsibilities of a message bus are: + * <ul> + * <li>Assign a route to every send message from its routing table + * <li>Deliver every message it <i>accepts</i> to the next hop on its route + * <i>or</i> deliver a <i>failure reply</i>. + * <li>Deliver replies back to message sources through all the intermediate hops. + * </ul> * - * <p>A runtime will typically</p> - * <ul><li>Create a message bus implementation and set properties on this - * implementation once.</li> - * <li>Create sessions using that message bus many places.</li></ul> + * A runtime will typically + * <ul> + * <li>Create a message bus implementation and set properties on this implementation once. + * <li>Create sessions using that message bus many places.</li> + * </ul> * - * @author btratseth - * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a> + * @author bratseth + * @author Simon Thoresen */ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler, ReplyHandler { @@ -101,9 +102,8 @@ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler, // Attach and start network. this.net = net; net.attach(this); - if (!net.waitUntilReady(120)) { + if ( ! net.waitUntilReady(120)) throw new IllegalStateException("Network failed to become ready in time."); - } // Start messenger. msn = new Messenger(); diff --git a/messagebus/src/main/java/com/yahoo/messagebus/NetworkMessageBus.java b/messagebus/src/main/java/com/yahoo/messagebus/NetworkMessageBus.java new file mode 100644 index 00000000000..24e177f1fbf --- /dev/null +++ b/messagebus/src/main/java/com/yahoo/messagebus/NetworkMessageBus.java @@ -0,0 +1,43 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.messagebus; + +import com.yahoo.messagebus.network.Network; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * The combination of a messagebus and a network over which it may send data. + * + * @author bratseth + */ +public class NetworkMessageBus { + + private final Network network; + private final MessageBus messageBus; + + private final AtomicBoolean destroyed = new AtomicBoolean(false); + + public NetworkMessageBus(Network network, MessageBus messageBus) { + this.network = network; + this.messageBus = messageBus; + } + + /** Returns the contained message bus object */ + public MessageBus getMessageBus() { return messageBus; } + + /** Returns the network of this as a Network */ + public Network getNetwork() { return network; } + + /** + * Irreversibly destroys the content of this. + * + * @return whether this destroyed anything, or if it was already destroyed + */ + public boolean destroy() { + if ( destroyed.getAndSet(true)) return false; + + getMessageBus().destroy(); + return true; + } + +} diff --git a/messagebus/src/main/java/com/yahoo/messagebus/RPCMessageBus.java b/messagebus/src/main/java/com/yahoo/messagebus/RPCMessageBus.java index d767e197b11..cfa50a35549 100644 --- a/messagebus/src/main/java/com/yahoo/messagebus/RPCMessageBus.java +++ b/messagebus/src/main/java/com/yahoo/messagebus/RPCMessageBus.java @@ -3,6 +3,7 @@ package com.yahoo.messagebus; import com.yahoo.log.LogLevel; import com.yahoo.messagebus.network.Identity; +import com.yahoo.messagebus.network.Network; import com.yahoo.messagebus.network.rpc.RPCNetwork; import com.yahoo.messagebus.network.rpc.RPCNetworkParams; @@ -17,12 +18,9 @@ import java.util.logging.Logger; * * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a> */ -public class RPCMessageBus { +public class RPCMessageBus extends NetworkMessageBus { private static final Logger log = Logger.getLogger(RPCMessageBus.class.getName()); - private final AtomicBoolean destroyed = new AtomicBoolean(false); - private final MessageBus mbus; - private final RPCNetwork net; private final ConfigAgent configAgent; /** @@ -33,9 +31,16 @@ public class RPCMessageBus { * @param routingCfgId The config id for message bus routing specs. */ public RPCMessageBus(MessageBusParams mbusParams, RPCNetworkParams rpcParams, String routingCfgId) { - net = new RPCNetwork(rpcParams); - mbus = new MessageBus(net, mbusParams); - configAgent = new ConfigAgent(routingCfgId != null ? routingCfgId : "client", mbus); + this(mbusParams, new RPCNetwork(rpcParams), routingCfgId); + } + + private RPCMessageBus(MessageBusParams mbusParams, RPCNetwork network, String routingCfgId) { + this(new MessageBus(network, mbusParams), network, routingCfgId); + } + + private RPCMessageBus(MessageBus messageBus, RPCNetwork network, String routingCfgId) { + super(network, messageBus); + configAgent = new ConfigAgent(routingCfgId != null ? routingCfgId : "client", messageBus); configAgent.subscribe(); } @@ -80,33 +85,17 @@ public class RPCMessageBus { * Sets the destroyed flag to true. The very first time this method is called, it cleans up all its dependencies. * Even if you retain a reference to this object, all of its content is allowed to be garbage collected. * - * @return True if content existed and was destroyed. + * @return true if content existed and was destroyed. */ + @Override public boolean destroy() { - if (!destroyed.getAndSet(true)) { + boolean destroyed = super.destroy(); + if (destroyed) configAgent.shutdown(); - mbus.destroy(); - return true; - } - return false; + return destroyed; } - /** - * Returns the contained message bus object. - * - * @return Message bus. - */ - public MessageBus getMessageBus() { - return mbus; - } - - /** - * Returns the contained rpc network object. - * - * @return RPC network. - */ - public RPCNetwork getRPCNetwork() { - return net; - } + /** Returns the network of this as a RPCNetwork */ + public RPCNetwork getRPCNetwork() { return (RPCNetwork)getNetwork(); } } diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/Identity.java b/messagebus/src/main/java/com/yahoo/messagebus/network/Identity.java index 37d42169a1a..45887b072ab 100644 --- a/messagebus/src/main/java/com/yahoo/messagebus/network/Identity.java +++ b/messagebus/src/main/java/com/yahoo/messagebus/network/Identity.java @@ -1,7 +1,12 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.messagebus.network; +import com.yahoo.log.LogLevel; import com.yahoo.net.HostName; +import com.yahoo.net.LinuxInetAddress; + +import java.net.Inet6Address; +import java.net.InetAddress; /** * This class encapsulates the identity of the application that uses this instance of message bus. This identity @@ -11,6 +16,7 @@ import com.yahoo.net.HostName; * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a> */ public class Identity { + private final String hostname; private final String servicePrefix; @@ -22,7 +28,11 @@ public class Identity { * @param configId The config identifier for the application. */ public Identity(String configId) { - hostname = HostName.getLocalhost(); + InetAddress addr = LinuxInetAddress.getLocalHost(); // try hard to get a resolvable address + if (addr instanceof Inet6Address) // + hostname = HostName.getLocalhost(); // ... but fallback to hostname if we get an IPv6 address + else + hostname = addr.getCanonicalHostName(); servicePrefix = configId; } @@ -55,4 +65,5 @@ public class Identity { public String getServicePrefix() { return servicePrefix; } + } diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/Network.java b/messagebus/src/main/java/com/yahoo/messagebus/network/Network.java index cd3b3286778..b0bbe4266c4 100644 --- a/messagebus/src/main/java/com/yahoo/messagebus/network/Network.java +++ b/messagebus/src/main/java/com/yahoo/messagebus/network/Network.java @@ -21,28 +21,28 @@ public interface Network { * @param seconds The timeout. * @return True if ready. */ - public boolean waitUntilReady(double seconds); + boolean waitUntilReady(double seconds); /** * Attach the network layer to the given owner * * @param owner owner of the network */ - public void attach(NetworkOwner owner); + void attach(NetworkOwner owner); /** * Register a session name with the network layer. This will make the session visible to other nodes. * * @param session the session name */ - public void registerSession(String session); + void registerSession(String session); /** * Unregister a session name with the network layer. This will make the session unavailable for other nodes. * * @param session session name */ - public void unregisterSession(String session); + void unregisterSession(String session); /** * Resolves the service address of the recipient referenced by the given routing node. If a recipient can not be @@ -52,7 +52,7 @@ public interface Network { * @param recipient The node whose service address to allocate. * @return True if a service address was allocated. */ - public boolean allocServiceAddress(RoutingNode recipient); + boolean allocServiceAddress(RoutingNode recipient); /** * Frees the service address from the given routing node. This allows the network layer to track and close @@ -60,7 +60,7 @@ public interface Network { * * @param recipient The node whose service address to free. */ - public void freeServiceAddress(RoutingNode recipient); + void freeServiceAddress(RoutingNode recipient); /** * Send a message to the given recipients. A {@link RoutingNode} contains all the necessary context for sending. @@ -68,7 +68,7 @@ public interface Network { * @param msg The message to send. * @param recipients A list of routing leaf nodes resolved for the message. */ - public void send(Message msg, List<RoutingNode> recipients); + void send(Message msg, List<RoutingNode> recipients); /** * Synchronize with internal threads. This method will handshake with all internal threads. This has the implicit @@ -76,12 +76,12 @@ public interface Network { * that would make the thread wait for itself... forever. This method is typically used to untangle during session * shutdown. */ - public void sync(); + void sync(); /** * Shuts down the network. This is a blocking call that waits for all scheduled tasks to complete. */ - public void shutdown(); + void shutdown(); /** * Returns a string that represents the connection specs of this network. It is in not a complete address since it @@ -89,12 +89,13 @@ public interface Network { * * @return The connection string. */ - public String getConnectionSpec(); + String getConnectionSpec(); /** * Returns a reference to a name server mirror. * * @return The mirror object. */ - public IMirror getMirror(); + IMirror getMirror(); + } diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalNetwork.java b/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalNetwork.java index ffcb853a0a7..78cf352cfbf 100644 --- a/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalNetwork.java +++ b/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalNetwork.java @@ -23,7 +23,7 @@ import java.util.concurrent.Executors; import static com.yahoo.messagebus.ErrorCode.NO_ADDRESS_FOR_SERVICE; /** - * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen Hult</a> + * @author Simon Thoresen Hult */ public class LocalNetwork implements Network { @@ -32,35 +32,39 @@ public class LocalNetwork implements Network { private final String hostId; private volatile NetworkOwner owner; - public LocalNetwork(final LocalWire wire) { + public LocalNetwork() { + this(new LocalWire()); + } + + public LocalNetwork(LocalWire wire) { this.wire = wire; this.hostId = wire.newHostId(); } @Override - public boolean waitUntilReady(final double seconds) { + public boolean waitUntilReady(double seconds) { return true; } @Override - public void attach(final NetworkOwner owner) { + public void attach(NetworkOwner owner) { this.owner = owner; } @Override - public void registerSession(final String session) { + public void registerSession(String session) { wire.registerService(hostId + "/" + session, this); } @Override - public void unregisterSession(final String session) { + public void unregisterSession(String session) { wire.unregisterService(hostId + "/" + session); } @Override - public boolean allocServiceAddress(final RoutingNode recipient) { - final String service = recipient.getRoute().getHop(0).getServiceName(); - final ServiceAddress address = wire.resolveServiceAddress(service); + public boolean allocServiceAddress(RoutingNode recipient) { + String service = recipient.getRoute().getHop(0).getServiceName(); + ServiceAddress address = wire.resolveServiceAddress(service); if (address == null) { recipient.setError(new Error(NO_ADDRESS_FOR_SERVICE, "No address for service '" + service + "'.")); return false; @@ -70,24 +74,24 @@ public class LocalNetwork implements Network { } @Override - public void freeServiceAddress(final RoutingNode recipient) { + public void freeServiceAddress(RoutingNode recipient) { recipient.setServiceAddress(null); } @Override - public void send(final Message msg, final List<RoutingNode> recipients) { - for (final RoutingNode recipient : recipients) { + public void send(Message msg, List<RoutingNode> recipients) { + for (RoutingNode recipient : recipients) { new MessageEnvelope(this, msg, recipient).send(); } } - private void receiveLater(final MessageEnvelope envelope) { - final byte[] payload = envelope.sender.encode(envelope.msg.getProtocol(), envelope.msg); + private void receiveLater(MessageEnvelope envelope) { + byte[] payload = envelope.sender.encode(envelope.msg.getProtocol(), envelope.msg); executor.execute(new Runnable() { @Override public void run() { - final Message msg = decode(envelope.msg.getProtocol(), payload, Message.class); + Message msg = decode(envelope.msg.getProtocol(), payload, Message.class); msg.getTrace().setLevel(envelope.msg.getTrace().getLevel()); msg.setRoute(envelope.msg.getRoute()).getRoute().removeHop(0); msg.setRetryEnabled(envelope.msg.getRetryEnabled()); @@ -96,7 +100,7 @@ public class LocalNetwork implements Network { msg.pushHandler(new ReplyHandler() { @Override - public void handleReply(final Reply reply) { + public void handleReply(Reply reply) { new ReplyEnvelope(LocalNetwork.this, envelope, reply).send(); } }); @@ -106,17 +110,17 @@ public class LocalNetwork implements Network { }); } - private void receiveLater(final ReplyEnvelope envelope) { - final byte[] payload = envelope.sender.encode(envelope.reply.getProtocol(), envelope.reply); + private void receiveLater(ReplyEnvelope envelope) { + byte[] payload = envelope.sender.encode(envelope.reply.getProtocol(), envelope.reply); executor.execute(new Runnable() { @Override public void run() { - final Reply reply = decode(envelope.reply.getProtocol(), payload, Reply.class); + Reply reply = decode(envelope.reply.getProtocol(), payload, Reply.class); reply.setRetryDelay(envelope.reply.getRetryDelay()); reply.getTrace().getRoot().addChild(TraceNode.decode(envelope.reply.getTrace().getRoot().encode())); for (int i = 0, len = envelope.reply.getNumErrors(); i < len; ++i) { - final Error error = envelope.reply.getError(i); + Error error = envelope.reply.getError(i); reply.addError(new Error(error.getCode(), error.getMessage(), error.getService() != null ? error.getService() : envelope.sender.hostId)); @@ -126,7 +130,7 @@ public class LocalNetwork implements Network { }); } - private byte[] encode(final Utf8String protocolName, final Routable toEncode) { + private byte[] encode(Utf8String protocolName, Routable toEncode) { if (toEncode.getType() == 0) { return new byte[0]; } @@ -134,7 +138,7 @@ public class LocalNetwork implements Network { } @SuppressWarnings("unchecked") - private <T extends Routable> T decode(final Utf8String protocolName, final byte[] toDecode, final Class<T> clazz) { + private <T extends Routable> T decode(Utf8String protocolName, byte[] toDecode, Class<T> clazz) { if (toDecode.length == 0) { return clazz.cast(new EmptyReply()); } @@ -167,15 +171,14 @@ public class LocalNetwork implements Network { final Message msg; final RoutingNode recipient; - MessageEnvelope(final LocalNetwork sender, final Message msg, final RoutingNode recipient) { + MessageEnvelope(LocalNetwork sender, Message msg, RoutingNode recipient) { this.sender = sender; this.msg = msg; this.recipient = recipient; } void send() { - LocalServiceAddress.class.cast(recipient.getServiceAddress()) - .getNetwork().receiveLater(this); + LocalServiceAddress.class.cast(recipient.getServiceAddress()).getNetwork().receiveLater(this); } } @@ -185,7 +188,7 @@ public class LocalNetwork implements Network { final MessageEnvelope parent; final Reply reply; - ReplyEnvelope(final LocalNetwork sender, final MessageEnvelope parent, final Reply reply) { + ReplyEnvelope(LocalNetwork sender, MessageEnvelope parent, Reply reply) { this.sender = sender; this.parent = parent; this.reply = reply; @@ -195,4 +198,5 @@ public class LocalNetwork implements Network { parent.sender.receiveLater(this); } } + } diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalWire.java b/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalWire.java index 84ca8c64bc0..5c9035a5f99 100644 --- a/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalWire.java +++ b/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalWire.java @@ -11,7 +11,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Pattern; /** - * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen Hult</a> + * @author Simon Thoresen Hult */ public class LocalWire implements IMirror { @@ -19,19 +19,19 @@ public class LocalWire implements IMirror { private final AtomicInteger updateCnt = new AtomicInteger(); private final ConcurrentHashMap<String, LocalNetwork> services = new ConcurrentHashMap<>(); - public void registerService(final String serviceName, final LocalNetwork owner) { + public void registerService(String serviceName, LocalNetwork owner) { if (services.putIfAbsent(serviceName, owner) != null) { throw new IllegalStateException(); } updateCnt.incrementAndGet(); } - public void unregisterService(final String serviceName) { + public void unregisterService(String serviceName) { services.remove(serviceName); updateCnt.incrementAndGet(); } - public LocalServiceAddress resolveServiceAddress(final String serviceName) { + public LocalServiceAddress resolveServiceAddress(String serviceName) { final LocalNetwork owner = services.get(serviceName); return owner != null ? new LocalServiceAddress(serviceName, owner) : null; } @@ -41,10 +41,10 @@ public class LocalWire implements IMirror { } @Override - public Mirror.Entry[] lookup(final String pattern) { - final List<Mirror.Entry> out = new ArrayList<>(); - final Pattern regex = Pattern.compile(pattern.replace("*", "[a-zA-Z0-9_-]+")); - for (final String key : services.keySet()) { + public Mirror.Entry[] lookup(String pattern) { + List<Mirror.Entry> out = new ArrayList<>(); + Pattern regex = Pattern.compile(pattern.replace("*", "[a-zA-Z0-9_-]+")); + for (String key : services.keySet()) { if (regex.matcher(key).matches()) { out.add(new Mirror.Entry(key, key)); } @@ -56,4 +56,5 @@ public class LocalWire implements IMirror { public int updates() { return updateCnt.get(); } + } diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java b/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java index 9ab24d662bd..f5f8dd56991 100644 --- a/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java +++ b/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java @@ -51,7 +51,7 @@ public class RPCNetwork implements Network, MethodHandler { private final ExecutorService sendService = new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(), Runtime.getRuntime().availableProcessors(), 0L, TimeUnit.SECONDS, - new SynchronousQueue<Runnable>(false), + new SynchronousQueue<>(false), ThreadFactoryFactory.getDaemonThreadFactory("mbus.net"), new ThreadPoolExecutor.CallerRunsPolicy()); /** diff --git a/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java b/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java index cde801d81f2..5e0df7068b0 100755 --- a/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java +++ b/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java @@ -11,28 +11,30 @@ import com.yahoo.messagebus.test.Receptor; import com.yahoo.messagebus.test.SimpleMessage; import com.yahoo.messagebus.test.SimpleProtocol; import junit.framework.TestCase; +import org.junit.After; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.List; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertFalse; /** - * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a> + * @author Simon Thoresen */ -public class ChokeTestCase extends TestCase { - - //////////////////////////////////////////////////////////////////////////////// - // - // Setup - // - //////////////////////////////////////////////////////////////////////////////// +public class ChokeTestCase { Slobrok slobrok; TestServer srcServer, dstServer; SourceSession srcSession; DestinationSession dstSession; - @Override + @Before public void setUp() throws ListenFailedException, UnknownHostException { slobrok = new Slobrok(); dstServer = new TestServer(new MessageBusParams().addProtocol(new SimpleProtocol()), @@ -45,7 +47,7 @@ public class ChokeTestCase extends TestCase { assertTrue(srcServer.waitSlobrok("dst/session", 1)); } - @Override + @After public void tearDown() { slobrok.stop(); dstSession.destroy(); @@ -54,12 +56,7 @@ public class ChokeTestCase extends TestCase { srcServer.destroy(); } - //////////////////////////////////////////////////////////////////////////////// - // - // Tests - // - //////////////////////////////////////////////////////////////////////////////// - + @Test public void testMaxCount() { int max = 10; dstServer.mb.setMaxPendingCount(max); @@ -107,6 +104,7 @@ public class ChokeTestCase extends TestCase { assertEquals(0, dstServer.mb.getPendingCount()); } + @Test public void testMaxSize() { int size = createMessage("msg").getApproxSize(); int max = size * 10; @@ -155,15 +153,10 @@ public class ChokeTestCase extends TestCase { assertEquals(0, dstServer.mb.getPendingSize()); } - //////////////////////////////////////////////////////////////////////////////// - // - // Utilities - // - //////////////////////////////////////////////////////////////////////////////// - private static Message createMessage(String msg) { Message ret = new SimpleMessage(msg); ret.getTrace().setLevel(9); return ret; } + } diff --git a/messagebus/src/vespa/messagebus/blob.h b/messagebus/src/vespa/messagebus/blob.h index 101ea989a92..86e317d6e79 100644 --- a/messagebus/src/vespa/messagebus/blob.h +++ b/messagebus/src/vespa/messagebus/blob.h @@ -22,7 +22,7 @@ public: * @param s size of the data to be created **/ Blob(uint32_t s) : - _payload(s), + _payload(vespalib::DefaultAlloc::create(s)), _sz(s) { } Blob(Blob && rhs) : @@ -55,11 +55,11 @@ public: **/ const char *data() const { return static_cast<const char *>(_payload.get()); } - vespalib::DefaultAlloc & payload() { return _payload; } - const vespalib::DefaultAlloc & payload() const { return _payload; } + vespalib::alloc::Alloc & payload() { return _payload; } + const vespalib::alloc::Alloc & payload() const { return _payload; } size_t size() const { return _sz; } private: - vespalib::DefaultAlloc _payload; + vespalib::alloc::Alloc _payload; size_t _sz; }; diff --git a/node-admin/README.md b/node-admin/README.md index b727dde7bb8..f4c5f5be4a5 100644 --- a/node-admin/README.md +++ b/node-admin/README.md @@ -8,6 +8,7 @@ You should have the docker daemon running and the following environment variable ``` DOCKER_HOST CONTAINER_CERT_PATH +VESPA_HOME ``` ## Building diff --git a/node-admin/README_MAC.md b/node-admin/README_MAC.md index 75a67f6a29c..16c1ca46612 100644 --- a/node-admin/README_MAC.md +++ b/node-admin/README_MAC.md @@ -1,5 +1,5 @@ # Setting up Docker on OS X -Install Docker Toolbox according to the procedure on [https://docs.docker.com/mac/step_one](https://docs.docker.com/mac/step_one). +Install Docker according to the procedure on [https://docs.docker.com/docker-for-mac/](https://docs.docker.com/docker-for-mac/). # Running Vespa on OS X @@ -43,7 +43,7 @@ scripts/setup-route-and-hosts-osx.sh The script will prompt you to continue as this will alter your routing table and /etc/hosts file. If your local zone is up and running, the config server should respond to this: ``` -curl config-server:19071 +curl config-server:4080 ``` If you don't want your `/etc/hosts` file to be changed, the diff --git a/node-admin/include/nodectl-instance.sh b/node-admin/include/nodectl-instance.sh index 5a6665dbdc7..a8d872b314e 100755 --- a/node-admin/include/nodectl-instance.sh +++ b/node-admin/include/nodectl-instance.sh @@ -103,7 +103,7 @@ stop() { $echo $VESPA_HOME/bin/vespa-routing vip -u chef out if has_searchnode; then - $echo $VESPA_HOME/bin/vespa-proton-cmd --local triggerFlush + $echo $VESPA_HOME/bin/vespa-proton-cmd --local prepareRestart fi if has_container; then diff --git a/node-admin/pom.xml b/node-admin/pom.xml index a655a63acaf..1cc89a1fd09 100644 --- a/node-admin/pom.xml +++ b/node-admin/pom.xml @@ -27,7 +27,6 @@ <groupId>com.yahoo.vespa</groupId> <artifactId>node-repository</artifactId> <version>${project.version}</version> - <scope>test</scope> </dependency> <dependency> <groupId>com.yahoo.vespa</groupId> @@ -81,12 +80,6 @@ <scope>compile</scope> </dependency> <dependency> - <groupId>com.yahoo.vespa</groupId> - <artifactId>application-model</artifactId> - <version>${project.version}</version> - <scope>provided</scope> - </dependency> - <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <scope>test</scope> diff --git a/node-admin/scripts/app.sh b/node-admin/scripts/app.sh index 83754413508..2757d637dc8 100755 --- a/node-admin/scripts/app.sh +++ b/node-admin/scripts/app.sh @@ -101,7 +101,7 @@ function DeployApp { # Create tenant echo -n "Creating tenant... " local create_tenant_response - if create_tenant_response=$(curl --silent --show-error -X PUT "http://$CONFIG_SERVER_HOSTNAME:$CONFIG_SERVER_PORT/application/v2/tenant/$TENANT_NAME" 2>&1) + if create_tenant_response=$(curl --silent --show-error -X PUT "http://$CONFIG_SERVER_HOSTNAME:$VESPA_WEB_SERVICE_PORT/application/v2/tenant/$TENANT_NAME" 2>&1) then if ! [[ "$create_tenant_response" =~ "Tenant $TENANT_NAME created" ]] && ! [[ "$create_tenant_response" =~ "already exists" ]] @@ -131,7 +131,7 @@ function UndeployApp { local app_name=default local output echo -n "Removing application $TENANT_NAME:$app_name... " - if ! output=$(curl --silent --show-error -X DELETE "http://$CONFIG_SERVER_HOSTNAME:$CONFIG_SERVER_PORT/application/v2/tenant/$TENANT_NAME/application/$app_name") + if ! output=$(curl --silent --show-error -X DELETE "http://$CONFIG_SERVER_HOSTNAME:$VESPA_WEB_SERVICE_PORT/application/v2/tenant/$TENANT_NAME/application/$app_name") then echo Fail "Failed to remove application: $output" diff --git a/node-admin/scripts/common.sh b/node-admin/scripts/common.sh index d07b4adcc5a..6a10fb71a99 100644 --- a/node-admin/scripts/common.sh +++ b/node-admin/scripts/common.sh @@ -28,7 +28,7 @@ declare -r NODE_ADMIN_CONTAINER_NAME=node-admin declare -r CONFIG_SERVER_CONTAINER_NAME=config-server declare -r CONFIG_SERVER_HOSTNAME="$CONFIG_SERVER_CONTAINER_NAME" declare -r CONFIG_SERVER_IP="$NETWORK_PREFIX.1.1" -declare -r CONFIG_SERVER_PORT=19071 +declare -r VESPA_WEB_SERVICE_PORT=4080 # E.g. config server port declare -r DEFAULT_HOSTED_VESPA_REGION=local-region declare -r DEFAULT_HOSTED_VESPA_ENVIRONMENT=prod diff --git a/node-admin/scripts/config-server.sh b/node-admin/scripts/config-server.sh index 60b05d4b3cd..0806f4374c6 100755 --- a/node-admin/scripts/config-server.sh +++ b/node-admin/scripts/config-server.sh @@ -89,6 +89,7 @@ function Start { --env "HOSTED_VESPA_REGION=$region" \ --env "HOSTED_VESPA_ENVIRONMENT=$environment" \ --env "CONFIG_SERVER_HOSTNAME=$CONFIG_SERVER_HOSTNAME" \ + --env "VESPA_HOME=$VESPA_HOME" \ --env "HOST_BRIDGE_IP=$HOST_BRIDGE_IP" \ --entrypoint /usr/local/bin/start-config-server.sh \ "$DOCKER_IMAGE") @@ -124,7 +125,7 @@ function Start { then # Wait for config server to come up echo -n "Waiting for healthy Config Server (~30s)" - local url="http://$CONFIG_SERVER_HOSTNAME:19071/state/v1/health" + local url="http://$CONFIG_SERVER_HOSTNAME:$VESPA_WEB_SERVICE_PORT/state/v1/health" while ! curl --silent --fail --max-time 1 "$url" >/dev/null do echo -n . diff --git a/node-admin/scripts/node-repo.sh b/node-admin/scripts/node-repo.sh index 94173a6726b..2e113843916 100755 --- a/node-admin/scripts/node-repo.sh +++ b/node-admin/scripts/node-repo.sh @@ -3,6 +3,8 @@ set -e +declare -r VESPA_WEB_SERVICE_PORT=4080 + # Output from InnerCurlNodeRepo, see there for details. declare CURL_RESPONSE @@ -162,7 +164,7 @@ function ProvisionNode { local config_server_hostname="$1" local json="$2" - local url="http://$config_server_hostname:19071/nodes/v2/node" + local url="http://$config_server_hostname:$VESPA_WEB_SERVICE_PORT/nodes/v2/node" CurlOrFail -H "Content-Type: application/json" -X POST -d "$json" "$url" } @@ -172,7 +174,7 @@ function SetNodeState { local hostname="$2" local state="$3" - local url="http://$config_server_hostname:19071/nodes/v2/state/$state/$hostname" + local url="http://$config_server_hostname:$VESPA_WEB_SERVICE_PORT/nodes/v2/state/$state/$hostname" CurlOrFail -X PUT "$url" } @@ -284,7 +286,7 @@ function RemoveCommand { local hostname for hostname in "$@" do - local url="http://$config_server_hostname:19071/nodes/v2/node/$hostname" + local url="http://$config_server_hostname:$VESPA_WEB_SERVICE_PORT/nodes/v2/node/$hostname" CurlOrFail -X DELETE "$url" echo -n . done diff --git a/node-admin/src/main/application/services.xml b/node-admin/src/main/application/services.xml index a5dea070285..c746afa0e85 100644 --- a/node-admin/src/main/application/services.xml +++ b/node-admin/src/main/application/services.xml @@ -8,11 +8,13 @@ </handler> <component id="node-admin" class="com.yahoo.vespa.hosted.node.admin.provider.ComponentsProviderImpl" bundle="node-admin"/> <component id="docker-api" class="com.yahoo.vespa.hosted.dockerapi.DockerImpl" bundle="docker-api"/> + <component id="metrics-wrapper" class="com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper" bundle="docker-api"/> <config name='vespa.hosted.dockerapi.docker'> - <caCertPath>/host/docker/certs/ca_cert.pem</caCertPath> - <clientCertPath>/host/docker/certs/client_cert.pem</clientCertPath> - <clientKeyPath>/host/docker/certs/client_key.pem</clientKeyPath> + <uri>tcp://localhost:2376</uri> + <caCertPath></caCertPath> + <clientCertPath></clientCertPath> + <clientKeyPath></clientKeyPath> </config> </jdisc> </services> diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/ContainerNodeSpec.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/ContainerNodeSpec.java index 7f7514a44fd..57aa56cf33c 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/ContainerNodeSpec.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/ContainerNodeSpec.java @@ -1,10 +1,9 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.DockerImage; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; +import com.yahoo.vespa.hosted.provision.Node; import java.util.Objects; import java.util.Optional; @@ -13,10 +12,15 @@ import java.util.Optional; * @author stiankri */ public class ContainerNodeSpec { - public final HostName hostname; + public final String hostname; public final Optional<DockerImage> wantedDockerImage; public final ContainerName containerName; - public final NodeState nodeState; + public final Node.State nodeState; + public final String nodeType; + public final String nodeFlavor; + public final Optional<String> vespaVersion; + public final Optional<Owner> owner; + public final Optional<Membership> membership; public final Optional<Long> wantedRestartGeneration; public final Optional<Long> currentRestartGeneration; public final Optional<Double> minCpuCores; @@ -24,10 +28,15 @@ public class ContainerNodeSpec { public final Optional<Double> minDiskAvailableGb; public ContainerNodeSpec( - final HostName hostname, + final String hostname, final Optional<DockerImage> wantedDockerImage, final ContainerName containerName, - final NodeState nodeState, + final Node.State nodeState, + final String nodeType, + final String nodeFlavor, + final Optional<String> vespaVersion, + final Optional<Owner> owner, + final Optional<Membership> membership, final Optional<Long> wantedRestartGeneration, final Optional<Long> currentRestartGeneration, final Optional<Double> minCpuCores, @@ -37,6 +46,11 @@ public class ContainerNodeSpec { this.wantedDockerImage = wantedDockerImage; this.containerName = containerName; this.nodeState = nodeState; + this.nodeType = nodeType; + this.nodeFlavor = nodeFlavor; + this.vespaVersion = vespaVersion; + this.owner = owner; + this.membership = membership; this.wantedRestartGeneration = wantedRestartGeneration; this.currentRestartGeneration = currentRestartGeneration; this.minCpuCores = minCpuCores; @@ -55,6 +69,11 @@ public class ContainerNodeSpec { Objects.equals(wantedDockerImage, that.wantedDockerImage) && Objects.equals(containerName, that.containerName) && Objects.equals(nodeState, that.nodeState) && + Objects.equals(nodeType, that.nodeType) && + Objects.equals(nodeFlavor, that.nodeFlavor) && + Objects.equals(vespaVersion, that.vespaVersion) && + Objects.equals(owner, that.owner) && + Objects.equals(membership, that.membership) && Objects.equals(wantedRestartGeneration, that.wantedRestartGeneration) && Objects.equals(currentRestartGeneration, that.currentRestartGeneration) && Objects.equals(minCpuCores, that.minCpuCores) && @@ -69,6 +88,11 @@ public class ContainerNodeSpec { wantedDockerImage, containerName, nodeState, + nodeType, + nodeFlavor, + vespaVersion, + owner, + membership, wantedRestartGeneration, currentRestartGeneration, minCpuCores, @@ -83,6 +107,11 @@ public class ContainerNodeSpec { + " wantedDockerImage=" + wantedDockerImage + " containerName=" + containerName + " nodeState=" + nodeState + + " nodeType = " + nodeType + + " nodeFlavor = " + nodeFlavor + + " vespaVersion = " + vespaVersion + + " owner = " + owner + + " membership = " + membership + " wantedRestartGeneration=" + wantedRestartGeneration + " minCpuCores=" + minCpuCores + " currentRestartGeneration=" + currentRestartGeneration @@ -90,4 +119,97 @@ public class ContainerNodeSpec { + " minDiskAvailableGb=" + minDiskAvailableGb + " }"; } + + public static class Owner { + public final String tenant; + public final String application; + public final String instance; + + public Owner(String tenant, String application, String instance) { + this.tenant = tenant; + this.application = application; + this.instance = instance; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Owner owner = (Owner) o; + + if (!tenant.equals(owner.tenant)) return false; + if (!application.equals(owner.application)) return false; + return instance.equals(owner.instance); + + } + + @Override + public int hashCode() { + int result = tenant.hashCode(); + result = 31 * result + application.hashCode(); + result = 31 * result + instance.hashCode(); + return result; + } + + public String toString() { + return "Owner {" + + " tenant = " + tenant + + " application = " + application + + " instance = " + instance + + " }"; + } + } + + public static class Membership { + public final String clusterType; + public final String clusterId; + public final String group; + public final int index; + public final boolean retired; + + public Membership(String clusterType, String clusterId, String group, int index, boolean retired) { + this.clusterType = clusterType; + this.clusterId = clusterId; + this.group = group; + this.index = index; + this.retired = retired; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Membership that = (Membership) o; + + if (index != that.index) return false; + if (retired != that.retired) return false; + if (!clusterType.equals(that.clusterType)) return false; + if (!clusterId.equals(that.clusterId)) return false; + return group.equals(that.group); + + } + + @Override + public int hashCode() { + int result = clusterType.hashCode(); + result = 31 * result + clusterId.hashCode(); + result = 31 * result + group.hashCode(); + result = 31 * result + index; + result = 31 * result + (retired ? 1 : 0); + return result; + } + + @Override + public String toString() { + return "Membership {" + + " clusterType = " + clusterType + + " clusterId = " + clusterId + + " group = " + group + + " index = " + index + + " retired = " + retired + + " }"; + } + } } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java index 28a8e9bc1aa..0357683e918 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java @@ -1,8 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.docker; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.dockerapi.ContainerName; +import com.yahoo.vespa.hosted.dockerapi.Docker; import com.yahoo.vespa.hosted.dockerapi.DockerImage; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator; @@ -11,7 +11,7 @@ public interface DockerOperations { String getVespaVersionOrNull(ContainerName containerName); // Returns true if container is absent on return - boolean removeContainerIfNeeded(ContainerNodeSpec nodeSpec, HostName hostname, Orchestrator orchestrator) + boolean removeContainerIfNeeded(ContainerNodeSpec nodeSpec, String hostname, Orchestrator orchestrator) throws Exception; // Returns true if started @@ -23,4 +23,6 @@ public interface DockerOperations { void scheduleDownloadOfImage(ContainerNodeSpec nodeSpec, Runnable callback); void executeResume(ContainerName containerName); + + Docker.ContainerStats getContainerStats(ContainerName containerName); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java index 29a2174acfe..a1da59376e3 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java @@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.node.admin.docker; import com.google.common.base.Joiner; import com.google.common.io.CharStreams; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.defaults.Defaults; import com.yahoo.vespa.hosted.dockerapi.Container; import com.yahoo.vespa.hosted.dockerapi.ContainerName; @@ -12,20 +11,26 @@ import com.yahoo.vespa.hosted.dockerapi.DockerImage; import com.yahoo.vespa.hosted.dockerapi.DockerImpl; import com.yahoo.vespa.hosted.dockerapi.ProcessResult; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator; import com.yahoo.vespa.hosted.node.admin.orchestrator.OrchestratorException; import com.yahoo.vespa.hosted.node.admin.util.Environment; import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger; +import com.yahoo.vespa.hosted.node.admin.util.SecretAgentScheduleMaker; import com.yahoo.vespa.hosted.node.maintenance.Maintainer; +import com.yahoo.vespa.hosted.provision.Node; +import java.io.IOException; import java.io.InputStreamReader; import java.net.Inet6Address; import java.net.InetAddress; import java.net.UnknownHostException; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Arrays; +import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.concurrent.CompletableFuture; import java.util.regex.Matcher; @@ -44,27 +49,32 @@ public class DockerOperationsImpl implements DockerOperations { private static final String[] RESUME_NODE_COMMAND = new String[] {NODE_PROGRAM, "resume"}; private static final String[] SUSPEND_NODE_COMMAND = new String[] {NODE_PROGRAM, "suspend"}; + private static final String[] RESTART_NODE_COMMAND = new String[] {NODE_PROGRAM, "restart"}; private static final Pattern VESPA_VERSION_PATTERN = Pattern.compile("^(\\S*)$", Pattern.MULTILINE); - private static final List<String> DIRECTORIES_TO_MOUNT = Arrays.asList( - getDefaults().underVespaHome("logs"), - getDefaults().underVespaHome("var/cache"), - getDefaults().underVespaHome("var/crash"), - getDefaults().underVespaHome("var/db/jdisc"), - getDefaults().underVespaHome("var/db/vespa"), - getDefaults().underVespaHome("var/jdisc_container"), - getDefaults().underVespaHome("var/jdisc_core"), - getDefaults().underVespaHome("var/maven"), - getDefaults().underVespaHome("var/run"), - getDefaults().underVespaHome("var/scoreboards"), - getDefaults().underVespaHome("var/service"), - getDefaults().underVespaHome("var/share"), - getDefaults().underVespaHome("var/spool"), - getDefaults().underVespaHome("var/vespa"), - getDefaults().underVespaHome("var/yca"), - getDefaults().underVespaHome("var/ycore++"), - getDefaults().underVespaHome("var/zookeeper")); + // Map of directories to mount and whether they should be writeable by everyone + private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>(); + static { + DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false); + DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false); + } private final Docker docker; private final Environment environment; @@ -108,12 +118,44 @@ public class DockerOperationsImpl implements DockerOperations { final Optional<Container> existingContainer = docker.getContainer(nodeSpec.hostname); if (!existingContainer.isPresent()) { startContainer(nodeSpec); + configureContainer(nodeSpec); return true; } else { return false; } } + private void configureContainer(ContainerNodeSpec nodeSpec) { + final Path yamasAgentFolder = Maintainer.pathInNodeAdminFromPathInNode(nodeSpec.containerName, "/etc/yamas-agent/"); + + Path vespaCheckPath = Paths.get("/home/y/libexec/yms/yms_check_vespa"); + SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("vespa", 60, vespaCheckPath, "all") + .withTag("role", "tenants") + .withTag("flavor", nodeSpec.nodeFlavor) + .withTag("state", nodeSpec.nodeState.toString()) + .withTag("zone", environment.getZone()); + + if (nodeSpec.owner.isPresent()) scheduleMaker + .withTag("tenantName", nodeSpec.owner.get().tenant) + .withTag("app", nodeSpec.owner.get().application); + + if (nodeSpec.membership.isPresent()) scheduleMaker + .withTag("clustertype", nodeSpec.membership.get().clusterType) + .withTag("clusterid", nodeSpec.membership.get().clusterId); + + if (nodeSpec.vespaVersion.isPresent()) scheduleMaker + .withTag("vespaVersion", nodeSpec.vespaVersion.get()); + + try { + scheduleMaker.writeTo(yamasAgentFolder); + } catch (IOException e) { + throw new RuntimeException("Failed to write secret-agent schedules for " + nodeSpec.containerName, e); + } + + docker.executeInContainer(nodeSpec.containerName, "service", "yamas-agent", "restart"); + } + + // Returns true if scheduling download @Override public boolean shouldScheduleDownloadOfImage(final DockerImage dockerImage) { @@ -121,34 +163,47 @@ public class DockerOperationsImpl implements DockerOperations { } @Override - public boolean removeContainerIfNeeded(ContainerNodeSpec nodeSpec, HostName hostname, Orchestrator orchestrator) + public boolean removeContainerIfNeeded(ContainerNodeSpec nodeSpec, String hostname, Orchestrator orchestrator) throws Exception { Optional<Container> existingContainer = docker.getContainer(hostname); if (! existingContainer.isPresent()) { return true; } + + PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, nodeSpec.containerName); Optional<String> removeReason = shouldRemoveContainer(nodeSpec, existingContainer); if (removeReason.isPresent()) { - PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, nodeSpec.containerName); logger.info("Will remove container " + existingContainer.get() + ": " + removeReason.get()); removeContainer(nodeSpec, existingContainer.get(), orchestrator); return true; } + Optional<String> restartReason = shouldRestartServices(nodeSpec); + if (restartReason.isPresent()) { + logger.info("Will restart services for container " + existingContainer.get() + ": " + restartReason.get()); + restartServices(nodeSpec, existingContainer.get(), orchestrator); + return true; + } + return false; } + private Optional<String> shouldRestartServices(ContainerNodeSpec nodeSpec) { + if (nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { + return Optional.of("Restart requested - wanted restart generation has been bumped: " + + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration + .get()); + } + return Optional.empty(); + } + private Optional<String> shouldRemoveContainer(ContainerNodeSpec nodeSpec, Optional<Container> existingContainer) { - if (nodeSpec.nodeState != NodeState.ACTIVE) { + if (nodeSpec.nodeState != Node.State.active) { return Optional.of("Node no longer active"); } if (!nodeSpec.wantedDockerImage.get().equals(existingContainer.get().image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.get() + " -> " + nodeSpec.wantedDockerImage.get()); } - if (nodeSpec.currentRestartGeneration.get() < nodeSpec.wantedRestartGeneration.get()) { - return Optional.of("Restart requested - wanted restart generation has been bumped: " - + nodeSpec.currentRestartGeneration.get() + " -> " + nodeSpec.wantedRestartGeneration.get()); - } if (!existingContainer.get().isRunning) { return Optional.of("Container no longer running"); } @@ -206,10 +261,10 @@ public class DockerOperationsImpl implements DockerOperations { logger.info("Starting container " + nodeSpec.containerName); try { - InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname.s()); + InetAddress nodeInetAddress = environment.getInetAddressForHost(nodeSpec.hostname); final boolean isIPv6 = nodeInetAddress instanceof Inet6Address; - String configServers = environment.getConfigServerHosts().stream().map(HostName::toString).collect(Collectors.joining(",")); + String configServers = environment.getConfigServerHosts().stream().collect(Collectors.joining(",")); Docker.CreateContainerCommand command = docker.createContainerCommand( nodeSpec.wantedDockerImage.get(), nodeSpec.containerName, @@ -219,7 +274,7 @@ public class DockerOperationsImpl implements DockerOperations { .withEnvironment("CONFIG_SERVER_ADDRESS", configServers); command.withVolume("/etc/hosts", "/etc/hosts"); - for (String pathInNode : DIRECTORIES_TO_MOUNT) { + for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) { String pathInHost = Maintainer.pathInHostFromPathInNode(nodeSpec.containerName, pathInNode).toString(); command = command.withVolume(pathInHost, pathInNode); } @@ -231,6 +286,9 @@ public class DockerOperationsImpl implements DockerOperations { long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb.get() * 1024); if (minMainMemoryAvailableMb > 0) { command.withMemoryInMb(minMainMemoryAvailableMb); + // TOTAL_MEMORY_MB is used to make any jdisc container think the machine + // only has this much physical memory (overrides total memory reported by `free -m`). + command.withEnvironment("TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } } @@ -244,12 +302,15 @@ public class DockerOperationsImpl implements DockerOperations { } else { docker.startContainer(nodeSpec.containerName); } + + DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry -> + docker.executeInContainer(nodeSpec.containerName, "sudo", "chmod", "-R", "a+w", entry.getKey())); } catch (UnknownHostException e) { throw new RuntimeException("Failed to create container " + nodeSpec.containerName.asString(), e); } } - private void setupContainerNetworkingWithScript(ContainerName containerName, HostName hostName) { + private void setupContainerNetworkingWithScript(ContainerName containerName, String hostName) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); Docker.ContainerInfo containerInfo = docker.inspectContainer(containerName); @@ -316,31 +377,9 @@ public class DockerOperationsImpl implements DockerOperations { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, nodeSpec.containerName); final ContainerName containerName = existingContainer.name; if (existingContainer.isRunning) { - // If we're stopping the node only to upgrade or restart the node or similar, we need to suspend - // the services. - if (nodeSpec.nodeState == NodeState.ACTIVE) { - // TODO: Also skip orchestration if we're downgrading in test/staging - // How to implement: - // - test/staging: We need to figure out whether we're in test/staging, by asking Chef!? Or, - // let the Orchestrator handle it - it may know what zone we're in. - // - downgrading: Impossible to know unless we look at the hosted version, which is - // not available in the docker image (nor its name). Not sure how to solve this. Should - // the node repo return the hosted version or a downgrade bit in addition to - // wanted docker image etc? - // Should the tenant pipeline instead use BCP tool to upgrade faster!? - // - // More generally, the node repo response should contain sufficient info on what the docker image is, - // to allow the node admin to make decisions that depend on the docker image. Or, each docker image - // needs to contain routines for drain and suspend. For many image, these can just be dummy routines. - - logger.info("Ask Orchestrator for permission to suspend node " + nodeSpec.hostname); - final boolean suspendAllowed = orchestrator.suspend(nodeSpec.hostname); - if (!suspendAllowed) { - logger.info("Orchestrator rejected suspend of node"); - // TODO: change suspend() to throw an exception if suspend is denied - throw new OrchestratorException("Failed to get permission to suspend " + nodeSpec.hostname); - } - + // If we're stopping the node only to upgrade we need to suspend the services. + if (nodeSpec.nodeState == Node.State.active) { + orchestratorSuspendNode(orchestrator, nodeSpec, logger); trySuspendNode(containerName); } @@ -352,14 +391,58 @@ public class DockerOperationsImpl implements DockerOperations { docker.deleteContainer(containerName); } + private void restartServices(ContainerNodeSpec nodeSpec, Container existingContainer, Orchestrator orchestrator) + throws Exception { + if (existingContainer.isRunning) { + ContainerName containerName = existingContainer.name; + PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName); + if (nodeSpec.nodeState == Node.State.active) { + logger.info("Restarting services for " + containerName); + // Since we are restarting the services we need to suspend the node. + orchestratorSuspendNode(orchestrator, nodeSpec, logger); + executeCommand(containerName, RESTART_NODE_COMMAND); + } + } + } - @Override - public void executeResume(ContainerName containerName) { - Optional<ProcessResult> result = executeOptionalProgram(containerName, RESUME_NODE_COMMAND); + // TODO: Also skip orchestration if we're downgrading in test/staging + // How to implement: + // - test/staging: We need to figure out whether we're in test/staging, zone is available in Environment + // - downgrading: Impossible to know unless we look at the hosted version, which is + // not available in the docker image (nor its name). Not sure how to solve this. Should + // the node repo return the hosted version or a downgrade bit in addition to + // wanted docker image etc? + // Should the tenant pipeline instead use BCP tool to upgrade faster!? + // + // More generally, the node repo response should contain sufficient info on what the docker image is, + // to allow the node admin to make decisions that depend on the docker image. Or, each docker image + // needs to contain routines for drain and suspend. For many images, these can just be dummy routines. + private void orchestratorSuspendNode(Orchestrator orchestrator, ContainerNodeSpec nodeSpec, PrefixLogger logger) throws OrchestratorException { + final String hostname = nodeSpec.hostname; + logger.info("Ask Orchestrator for permission to suspend node " + hostname); + if ( ! orchestrator.suspend(hostname)) { + logger.info("Orchestrator rejected suspend of node " + hostname); + // TODO: change suspend() to throw an exception if suspend is denied + throw new OrchestratorException("Failed to get permission to suspend " + hostname); + } + } + + public void executeCommand(ContainerName containerName, String[] command) { + Optional<ProcessResult> result = executeOptionalProgram(containerName, command); if (result.isPresent() && !result.get().isSuccess()) { - throw new RuntimeException("Container " +containerName.asString() - + ": command " + Arrays.toString(RESUME_NODE_COMMAND) + " failed: " + result.get()); + throw new RuntimeException("Container " + containerName.asString() + + ": command " + Arrays.toString(command) + " failed: " + result.get()); } } + + @Override + public void executeResume(ContainerName containerName) { + executeCommand(containerName, RESUME_NODE_COMMAND); + } + + @Override + public Docker.ContainerStats getContainerStats(ContainerName containerName) { + return docker.getContainerStats(containerName); + } } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceScheduler.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceScheduler.java deleted file mode 100644 index 711edf4544d..00000000000 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceScheduler.java +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.node.admin.maintenance; - -import com.yahoo.vespa.hosted.dockerapi.ContainerName; - -import java.io.IOException; - -/** - * @author valerijf - */ -public interface MaintenanceScheduler { - void removeOldFilesFromNode(ContainerName containerName); - - void cleanNodeAdmin(); - - void deleteContainerStorage(ContainerName containerName) throws IOException; -} diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceSchedulerImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java index db6b40f4c73..6b14926388d 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceSchedulerImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java @@ -13,18 +13,79 @@ import java.io.InputStreamReader; import java.nio.file.Files; import java.nio.file.Path; import java.time.Duration; +import java.time.Instant; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; /** * @author valerijf */ -public class MaintenanceSchedulerImpl implements MaintenanceScheduler { - private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(MaintenanceSchedulerImpl.class); - +public class StorageMaintainer { + private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(StorageMaintainer.class); private static final String[] baseArguments = {"sudo", "/home/y/libexec/vespa/node-admin/maintenance.sh"}; + private static final long intervalSec = 1000; + + private final Object monitor = new Object(); + + private Map<ContainerName, MetricsCache> metricsCacheByContainerName = new ConcurrentHashMap<>(); + + public Map<String, Number> updateIfNeededAndGetDiskMetricsFor(ContainerName containerName) { + // Calculating disk usage is IO expensive operation and its value changes relatively slowly, we want to perform + // that calculation rarely. Additionally, we spread out the calculation for different containers by adding + // a random deviation. + if (! metricsCacheByContainerName.containsKey(containerName) || + metricsCacheByContainerName.get(containerName).nextUpdateAt.isBefore(Instant.now())) { + long distributedSecs = (long) (intervalSec * (0.5 + Math.random())); + MetricsCache metricsCache = new MetricsCache(Instant.now().plusSeconds(distributedSecs)); + + // Throttle to one disk usage calculation at a time. + synchronized (monitor) { + PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); + File containerDir = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/home/").toFile(); + try { + long used = getDiscUsedInBytes(containerDir); + metricsCache.metrics.put("node.disk.used", used); + } catch (Throwable e) { + logger.error("Problems during disk usage calculations: " + e.getMessage()); + } + } + + metricsCacheByContainerName.put(containerName, metricsCache); + } + + return metricsCacheByContainerName.get(containerName).metrics; + } + + // Public for testing + long getDiscUsedInBytes(File path) throws IOException, InterruptedException { + final String[] command = {"du", "-xsk", path.toString()}; + + Process duCommand = new ProcessBuilder().command(command).start(); + if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { + duCommand.destroy(); + throw new RuntimeException("Disk usage command timedout, aborting."); + } + String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); + String error = IOUtils.readAll(new InputStreamReader(duCommand.getErrorStream())); + + if (! error.isEmpty()) { + throw new RuntimeException("Disk usage wrote to error log: " + error); + } + + String[] results = output.split("\t"); + if (results.length != 2) { + throw new RuntimeException("Result from disk usage command not as expected: " + output); + } + long diskUsageKB = Long.valueOf(results[0]); + + return diskUsageKB * 1024; + } - @Override public void removeOldFilesFromNode(ContainerName containerName) { - PrefixLogger logger = PrefixLogger.getNodeAgentLogger(MaintenanceSchedulerImpl.class, containerName); + PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); String[] pathsToClean = {"/home/y/logs/elasticsearch2", "/home/y/logs/logstash2", "/home/y/logs/daemontools_y", "/home/y/logs/nginx", "/home/y/logs/vespa"}; @@ -46,22 +107,20 @@ public class MaintenanceSchedulerImpl implements MaintenanceScheduler { DeleteOldAppData.deleteFiles(fileDistrDir.getAbsolutePath(), Duration.ofDays(31).getSeconds(), null, false); } - execute(logger, Maintainer.JOB_CLEAN_CORE_DUMPS); + execute(logger, concatenateArrays(baseArguments, Maintainer.JOB_CLEAN_CORE_DUMPS)); } - @Override public void cleanNodeAdmin() { - execute(NODE_ADMIN_LOGGER, Maintainer.JOB_DELETE_OLD_APP_DATA); - execute(NODE_ADMIN_LOGGER, Maintainer.JOB_CLEAN_HOME); + execute(NODE_ADMIN_LOGGER, concatenateArrays(baseArguments, Maintainer.JOB_DELETE_OLD_APP_DATA)); + execute(NODE_ADMIN_LOGGER, concatenateArrays(baseArguments, Maintainer.JOB_CLEAN_HOME)); File nodeAdminJDiskLogsPath = Maintainer.pathInNodeAdminFromPathInNode(new ContainerName("node-admin"), "/home/y/logs/jdisc_core/").toFile(); DeleteOldAppData.deleteFiles(nodeAdminJDiskLogsPath.getAbsolutePath(), Duration.ofDays(31).getSeconds(), null, false); } - @Override public void deleteContainerStorage(ContainerName containerName) throws IOException { - PrefixLogger logger = PrefixLogger.getNodeAgentLogger(MaintenanceSchedulerImpl.class, containerName); + PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName); File yVarDir = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/home/y/var").toFile(); if (yVarDir.exists()) { @@ -82,21 +141,30 @@ public class MaintenanceSchedulerImpl implements MaintenanceScheduler { private void execute(PrefixLogger logger, String... params) { try { - Process p = Runtime.getRuntime().exec(concatenateArrays(baseArguments, params)); + Process p = Runtime.getRuntime().exec(params); String output = IOUtils.readAll(new InputStreamReader(p.getInputStream())); String errors = IOUtils.readAll(new InputStreamReader(p.getErrorStream())); if (! output.isEmpty()) logger.info(output); if (! errors.isEmpty()) logger.error(errors); } catch (IOException e) { - e.printStackTrace(); + NODE_ADMIN_LOGGER.warning("Failed to execute command " + Arrays.toString(params), e); } } - private static String[] concatenateArrays(String[] ar1, String[] ar2) { + private static String[] concatenateArrays(String[] ar1, String... ar2) { String[] concatenated = new String[ar1.length + ar2.length]; System.arraycopy(ar1, 0, concatenated, 0, ar1.length); System.arraycopy(ar2, 0, concatenated, ar1.length, ar2.length); return concatenated; } + + private static class MetricsCache { + private final Instant nextUpdateAt; + private final Map<String, Number> metrics = new HashMap<>(); + + MetricsCache(Instant nextUpdateAt) { + this.nextUpdateAt = nextUpdateAt; + } + } }
\ No newline at end of file diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java index 0b6a0608ac8..3dbb202b7b2 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.nodeadmin; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import java.util.List; @@ -45,7 +44,7 @@ public interface NodeAdmin { /** * Returns list of hosts. */ - Set<HostName> getListOfHosts(); + Set<String> getListOfHosts(); /** * Returns a map containing all relevant NodeAdmin variables and their current values. diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java index f490a067aa9..15a33dddd3c 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java @@ -2,14 +2,19 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin; import com.yahoo.collections.Pair; -import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.net.HostName; +import com.yahoo.vespa.hosted.dockerapi.metrics.CounterWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.Dimensions; +import com.yahoo.vespa.hosted.dockerapi.metrics.GaugeWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.dockerapi.Container; import com.yahoo.vespa.hosted.dockerapi.Docker; import com.yahoo.vespa.hosted.dockerapi.DockerImage; -import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler; +import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent; import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger; +import com.yahoo.vespa.hosted.provision.Node; import java.io.IOException; import java.time.Duration; @@ -21,11 +26,16 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + /** * Administers a host (for now only docker hosts) and its nodes (docker containers nodes). * @@ -33,38 +43,79 @@ import java.util.stream.Stream; */ public class NodeAdminImpl implements NodeAdmin { private static final PrefixLogger logger = PrefixLogger.getNodeAdminLogger(NodeAdmin.class); + private final ScheduledExecutorService metricsFetcherScheduler = Executors.newScheduledThreadPool(1); private static final long MIN_AGE_IMAGE_GC_MILLIS = Duration.ofMinutes(15).toMillis(); private final Docker docker; - private final Function<HostName, NodeAgent> nodeAgentFactory; - private final MaintenanceScheduler maintenanceScheduler; + private final Function<String, NodeAgent> nodeAgentFactory; + private final StorageMaintainer storageMaintainer; private AtomicBoolean frozen = new AtomicBoolean(false); - private final Map<HostName, NodeAgent> nodeAgents = new HashMap<>(); + private final Map<String, NodeAgent> nodeAgents = new HashMap<>(); private Map<DockerImage, Long> firstTimeEligibleForGC = Collections.emptyMap(); private final int nodeAgentScanIntervalMillis; + private GaugeWrapper numberOfContainersInActiveState; + private GaugeWrapper numberOfContainersInLoadImageState; + private CounterWrapper numberOfUnhandledExceptionsInNodeAgent; + /** * @param docker interface to docker daemon and docker-related tasks * @param nodeAgentFactory factory for {@link NodeAgent} objects */ - public NodeAdminImpl(final Docker docker, final Function<HostName, NodeAgent> nodeAgentFactory, - final MaintenanceScheduler maintenanceScheduler, int nodeAgentScanIntervalMillis) { + public NodeAdminImpl(final Docker docker, final Function<String, NodeAgent> nodeAgentFactory, + final StorageMaintainer storageMaintainer, int nodeAgentScanIntervalMillis, + final MetricReceiverWrapper metricReceiver) { this.docker = docker; this.nodeAgentFactory = nodeAgentFactory; - this.maintenanceScheduler = maintenanceScheduler; + this.storageMaintainer = storageMaintainer; this.nodeAgentScanIntervalMillis = nodeAgentScanIntervalMillis; + + Dimensions dimensions = new Dimensions.Builder() + .add("host", HostName.getLocalhost()) + .add("role", "docker").build(); + + this.numberOfContainersInActiveState = metricReceiver.declareGauge(dimensions, "nodes.state.active"); + this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(dimensions, "nodes.image.loading"); + this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(dimensions, "nodes.unhandled_exceptions"); + + metricsFetcherScheduler.scheduleWithFixedDelay(() -> { + try { + nodeAgents.values().forEach(NodeAgent::updateContainerNodeMetrics); + } catch (Throwable e) { + logger.warning("Metric fetcher scheduler failed", e); + } + }, 0, 30000, MILLISECONDS); } public void refreshContainersToRun(final List<ContainerNodeSpec> containersToRun) { final List<Container> existingContainers = docker.getAllManagedContainers(); - maintenanceScheduler.cleanNodeAdmin(); + storageMaintainer.cleanNodeAdmin(); synchronizeNodeSpecsToNodeAgents(containersToRun, existingContainers); garbageCollectDockerImages(containersToRun); + + updateNodeAgentMetrics(); + } + + private void updateNodeAgentMetrics() { + int numberContainersInActive = 0; + int numberContainersWaitingImage = 0; + int numberOfNewUnhandledExceptions = 0; + + for (NodeAgent nodeAgent : nodeAgents.values()) { + Optional<ContainerNodeSpec> nodeSpec = nodeAgent.getContainerNodeSpec(); + if (nodeSpec.isPresent() && nodeSpec.get().nodeState == Node.State.active) numberContainersInActive++; + if (nodeAgent.isDownloadingImage()) numberContainersWaitingImage++; + numberOfNewUnhandledExceptions += nodeAgent.getAndResetNumberOfUnhandledExceptions(); + } + + numberOfContainersInActiveState.sample(numberContainersInActive); + numberOfContainersInLoadImageState.sample(numberContainersWaitingImage); + numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions); } public boolean freezeNodeAgentsAndCheckIfAllFrozen() { @@ -95,7 +146,7 @@ public class NodeAdminImpl implements NodeAdmin { this.frozen.set(frozen); } - public Set<HostName> getListOfHosts() { + public Set<String> getListOfHosts() { return nodeAgents.keySet(); } @@ -112,6 +163,15 @@ public class NodeAdminImpl implements NodeAdmin { @Override public void shutdown() { + metricsFetcherScheduler.shutdown(); + try { + if (! metricsFetcherScheduler.awaitTermination(30, TimeUnit.SECONDS)) { + throw new RuntimeException("Did not manage to shutdown node-agent metrics update metricsFetcherScheduler."); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + for (NodeAgent nodeAgent : nodeAgents.values()) { nodeAgent.stop(); } @@ -165,10 +225,10 @@ public class NodeAdminImpl implements NodeAdmin { containersToRun.stream(), nodeSpec -> nodeSpec.hostname, existingContainers.stream(), container -> container.hostname); - final Set<HostName> nodeHostNames = containersToRun.stream() + final Set<String> nodeHostNames = containersToRun.stream() .map(spec -> spec.hostname) .collect(Collectors.toSet()); - final Set<HostName> obsoleteAgentHostNames = diff(nodeAgents.keySet(), nodeHostNames); + final Set<String> obsoleteAgentHostNames = diff(nodeAgents.keySet(), nodeHostNames); obsoleteAgentHostNames.forEach(hostName -> nodeAgents.remove(hostName).stop()); nodeSpecContainerPairs.forEach(nodeSpecContainerPair -> { diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java index 54009216286..22ceb5f494c 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java @@ -2,12 +2,11 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin; import com.yahoo.component.AbstractComponent; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepository; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator; import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger; +import com.yahoo.vespa.hosted.provision.Node; import java.io.IOException; import java.util.LinkedHashMap; @@ -89,6 +88,8 @@ public class NodeAdminStateUpdater extends AbstractComponent { } catch (IOException e) { return Optional.of("Failed to get nodes from node repo:" + e.getMessage()); } + // Avoids media type issues (HTTP ERROR: 415 Unsupported Media Type), probably related to having empty node list. + if (nodesInActiveState.size() == 0) return Optional.empty(); return orchestrator.suspend(dockerHostHostName, nodesInActiveState); } else { nodeAdmin.unfreezeNodeAgents(); @@ -143,9 +144,8 @@ public class NodeAdminStateUpdater extends AbstractComponent { private List<String> getNodesInActiveState() throws IOException { return nodeRepository.getContainersToRun() .stream() - .filter(nodespec -> nodespec.nodeState == NodeState.ACTIVE) + .filter(nodespec -> nodespec.nodeState == Node.State.active) .map(nodespec -> nodespec.hostname) - .map(HostName::toString) .collect(Collectors.toList()); } } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java index fe6af0fdeec..2f4b5521343 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java @@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.node.admin.nodeagent; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import java.util.Map; +import java.util.Optional; /** * Responsible for management of a single node over its lifecycle. @@ -55,5 +56,20 @@ public interface NodeAgent { /** * Returns the {@link ContainerNodeSpec} for this node agent. */ - ContainerNodeSpec getContainerNodeSpec(); + Optional<ContainerNodeSpec> getContainerNodeSpec(); + + /** + * Updates metric receiver with the latest node-agent stats + */ + void updateContainerNodeMetrics(); + + /** + * Returns true if NodeAgent is waiting for an image download to finish + */ + boolean isDownloadingImage(); + + /** + * Returns and resets number of unhandled exceptions + */ + int getAndResetNumberOfUnhandledExceptions(); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java index 46dc0714e18..d4aa5f69e0a 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java @@ -1,15 +1,18 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.nodeagent; -import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.vespa.hosted.dockerapi.Docker; import com.yahoo.vespa.hosted.dockerapi.DockerImage; +import com.yahoo.vespa.hosted.dockerapi.metrics.Dimensions; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations; -import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler; +import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer; import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepository; import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepositoryImpl; import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator; import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger; +import com.yahoo.vespa.hosted.provision.Node; import java.io.IOException; import java.text.SimpleDateFormat; @@ -19,6 +22,7 @@ import java.util.Date; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.Map; +import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; import static com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl.ContainerState.ABSENT; @@ -41,12 +45,12 @@ public class NodeAgentImpl implements NodeAgent { private DockerImage imageBeingDownloaded = null; - private final HostName hostname; + private final String hostname; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; - private final MaintenanceScheduler maintenanceScheduler; + private final StorageMaintainer storageMaintainer; private final Object monitor = new Object(); @@ -54,6 +58,7 @@ public class NodeAgentImpl implements NodeAgent { private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); private long delaysBetweenEachTickMillis; + private int numberOfUnhandledException = 0; private Thread loopThread; @@ -66,21 +71,26 @@ public class NodeAgentImpl implements NodeAgent { // The attributes of the last successful node repo attribute update for this node. Used to avoid redundant calls. private NodeAttributes lastAttributesSet = null; - private ContainerNodeSpec lastNodeSpec = null; + ContainerNodeSpec lastNodeSpec = null; + + private final MetricReceiverWrapper metricReceiver; public NodeAgentImpl( - final HostName hostName, + final String hostName, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, - final MaintenanceScheduler maintenanceScheduler) { + final StorageMaintainer storageMaintainer, + final MetricReceiverWrapper metricReceiver) { this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.hostname = hostName; this.dockerOperations = dockerOperations; - this.maintenanceScheduler = maintenanceScheduler; + this.storageMaintainer = storageMaintainer; this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class, - NodeRepositoryImpl.containerNameFromHostName(hostName.toString())); + NodeRepositoryImpl.containerNameFromHostName(hostName)); + + this.metricReceiver = metricReceiver; } @Override @@ -139,7 +149,7 @@ public class NodeAgentImpl implements NodeAgent { throw new RuntimeException("Can not restart a node agent."); } loopThread = new Thread(this::loop); - loopThread.setName("loop-" + hostname.toString()); + loopThread.setName("loop-" + hostname); loopThread.start(); } @@ -190,7 +200,7 @@ public class NodeAgentImpl implements NodeAgent { publishStateToNodeRepoIfChanged(nodeSpec.hostname, nodeAttributes); } - private void publishStateToNodeRepoIfChanged(HostName hostName, NodeAttributes currentAttributes) throws IOException { + private void publishStateToNodeRepoIfChanged(String hostName, NodeAttributes currentAttributes) throws IOException { // TODO: We should only update if the new current values do not match the node repo's current values if (!currentAttributes.equals(lastAttributesSet)) { logger.info("Publishing new set of attributes to node repo: " @@ -233,7 +243,7 @@ public class NodeAgentImpl implements NodeAgent { imageBeingDownloaded = nodeSpec.wantedDockerImage.get(); // Create a signalWorkToBeDone when download is finished. dockerOperations.scheduleDownloadOfImage(nodeSpec, this::signalWorkToBeDone); - } else { + } else if (imageBeingDownloaded != null) { // Image was downloading, but now its ready imageBeingDownloaded = null; } } @@ -273,6 +283,7 @@ public class NodeAgentImpl implements NodeAgent { try { tick(); } catch (Exception e) { + numberOfUnhandledException++; logger.error("Unhandled exception, ignoring.", e); addDebugMessage(e.getMessage()); } catch (Throwable t) { @@ -291,20 +302,24 @@ public class NodeAgentImpl implements NodeAgent { synchronized (monitor) { if (!nodeSpec.equals(lastNodeSpec)) { + // If we transition from active, to not active state, unset the current metrics + if (lastNodeSpec != null && lastNodeSpec.nodeState == Node.State.active && nodeSpec.nodeState != Node.State.active) { + metricReceiver.unsetMetricsForContainer(hostname); + } addDebugMessage("Loading new node spec: " + nodeSpec.toString()); lastNodeSpec = nodeSpec; } } switch (nodeSpec.nodeState) { - case READY: + case ready: removeContainerIfNeededUpdateContainerState(nodeSpec); break; - case RESERVED: + case reserved: removeContainerIfNeededUpdateContainerState(nodeSpec); break; - case ACTIVE: - maintenanceScheduler.removeOldFilesFromNode(nodeSpec.containerName); + case active: + storageMaintainer.removeOldFilesFromNode(nodeSpec.containerName); scheduleDownLoadIfNeeded(nodeSpec); if (imageBeingDownloaded != null) { addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString()); @@ -328,19 +343,20 @@ public class NodeAgentImpl implements NodeAgent { logger.info("Call resume against Orchestrator"); orchestrator.resume(nodeSpec.hostname); break; - case INACTIVE: - maintenanceScheduler.removeOldFilesFromNode(nodeSpec.containerName); + case inactive: + storageMaintainer.removeOldFilesFromNode(nodeSpec.containerName); removeContainerIfNeededUpdateContainerState(nodeSpec); break; - case PROVISIONED: - case DIRTY: - maintenanceScheduler.removeOldFilesFromNode(nodeSpec.containerName); + case provisioned: + case dirty: + storageMaintainer.removeOldFilesFromNode(nodeSpec.containerName); removeContainerIfNeededUpdateContainerState(nodeSpec); logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready"); - maintenanceScheduler.deleteContainerStorage(nodeSpec.containerName); + storageMaintainer.deleteContainerStorage(nodeSpec.containerName); updateNodeRepoAndMarkNodeAsReady(nodeSpec); break; - case FAILED: + case parked: + case failed: removeContainerIfNeededUpdateContainerState(nodeSpec); break; default: @@ -348,9 +364,79 @@ public class NodeAgentImpl implements NodeAgent { } } - public ContainerNodeSpec getContainerNodeSpec() { + @SuppressWarnings("unchecked") + public void updateContainerNodeMetrics() { + ContainerNodeSpec nodeSpec; synchronized (monitor) { - return lastNodeSpec; + nodeSpec = lastNodeSpec; + } + + if (nodeSpec == null || nodeSpec.nodeState != Node.State.active) return; + Docker.ContainerStats stats = dockerOperations.getContainerStats(nodeSpec.containerName); + Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() + .add("host", hostname) + .add("role", "tenants") + .add("flavor", nodeSpec.nodeFlavor) + .add("state", nodeSpec.nodeState.toString()); + + if (nodeSpec.owner.isPresent()) { + dimensionsBuilder + .add("tenantName", nodeSpec.owner.get().tenant) + .add("app", nodeSpec.owner.get().application); + } + if (nodeSpec.membership.isPresent()) { + dimensionsBuilder + .add("clustertype", nodeSpec.membership.get().clusterType) + .add("clusterid", nodeSpec.membership.get().clusterId); } + + if (nodeSpec.vespaVersion.isPresent()) dimensionsBuilder.add("vespaVersion", nodeSpec.vespaVersion.get()); + + Dimensions dimensions = dimensionsBuilder.build(); + addIfNotNull(dimensions, "node.cpu.throttled_time", stats.getCpuStats().get("throttling_data"), "throttled_time"); + addIfNotNull(dimensions, "node.cpu.total_usage", stats.getCpuStats().get("cpu_usage"), "total_usage"); + addIfNotNull(dimensions, "node.cpu.system_cpu_usage", stats.getCpuStats(), "system_cpu_usage"); + + addIfNotNull(dimensions, "node.memory.limit", stats.getMemoryStats(), "limit"); + addIfNotNull(dimensions, "node.memory.usage", stats.getMemoryStats(), "usage"); + + stats.getNetworks().forEach((interfaceName, interfaceStats) -> { + Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); + + addIfNotNull(netDims, "node.network.bytes_rcvd", interfaceStats, "rx_bytes"); + addIfNotNull(netDims, "node.network.bytes_sent", interfaceStats, "tx_bytes"); + }); + + storageMaintainer.updateIfNeededAndGetDiskMetricsFor(nodeSpec.containerName).forEach( + (metricName, metricValue) -> metricReceiver.declareGauge(dimensions, metricName).sample(metricValue.doubleValue())); + } + + @SuppressWarnings("unchecked") + private void addIfNotNull(Dimensions dimensions, String yamasName, Object metrics, String metricName) { + Map<String, Object> metricsMap = (Map<String, Object>) metrics; + if (metricsMap == null || !metricsMap.containsKey(metricName)) return; + try { + metricReceiver.declareGauge(dimensions, yamasName).sample(((Number) metricsMap.get(metricName)).doubleValue()); + } catch (Throwable e) { + logger.warning("Failed to update " + yamasName + " metric with value " + metricsMap.get(metricName), e); + } + } + + public Optional<ContainerNodeSpec> getContainerNodeSpec() { + synchronized (monitor) { + return Optional.ofNullable(lastNodeSpec); + } + } + + @Override + public boolean isDownloadingImage() { + return imageBeingDownloaded != null; + } + + @Override + public int getAndResetNumberOfUnhandledExceptions() { + int temp = numberOfUnhandledException; + numberOfUnhandledException = 0; + return temp; } } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepository.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepository.java index fe40ff124b9..f29e79ec271 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepository.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepository.java @@ -1,9 +1,7 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.noderepository; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; -import com.yahoo.vespa.hosted.dockerapi.DockerImage; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAttributes; import java.io.IOException; @@ -16,9 +14,9 @@ import java.util.Optional; public interface NodeRepository { List<ContainerNodeSpec> getContainersToRun() throws IOException; - Optional<ContainerNodeSpec> getContainerNodeSpec(HostName hostName) throws IOException; + Optional<ContainerNodeSpec> getContainerNodeSpec(String hostName) throws IOException; - void updateNodeAttributes(HostName hostName, NodeAttributes nodeAttributes) throws IOException; + void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) throws IOException; - void markAsReady(HostName hostName) throws IOException; + void markAsReady(String hostName) throws IOException; } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImpl.java index c86aff44d19..d67a19491b4 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImpl.java @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.noderepository; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.DockerImage; @@ -12,6 +11,7 @@ import com.yahoo.vespa.hosted.node.admin.noderepository.bindings.UpdateNodeAttri import com.yahoo.vespa.hosted.node.admin.noderepository.bindings.UpdateNodeAttributesResponse; import com.yahoo.vespa.hosted.node.admin.util.ConfigServerHttpRequestExecutor; import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger; +import com.yahoo.vespa.hosted.provision.Node; import java.io.IOException; import java.util.ArrayList; @@ -29,7 +29,7 @@ public class NodeRepositoryImpl implements NodeRepository { private final int port; private final ConfigServerHttpRequestExecutor requestExecutor; - public NodeRepositoryImpl(Set<HostName> configServerHosts, int configPort, String baseHostName) { + public NodeRepositoryImpl(Set<String> configServerHosts, int configPort, String baseHostName) { this.baseHostName = baseHostName; this.port = configPort; this.requestExecutor = ConfigServerHttpRequestExecutor.create(configServerHosts); @@ -65,7 +65,7 @@ public class NodeRepositoryImpl implements NodeRepository { } @Override - public Optional<ContainerNodeSpec> getContainerNodeSpec(HostName hostName) throws IOException { + public Optional<ContainerNodeSpec> getContainerNodeSpec(String hostName) throws IOException { final GetNodesResponse nodeResponse = requestExecutor.get( "/nodes/v2/node/?hostname=" + hostName + "&recursive=true", port, @@ -76,7 +76,7 @@ public class NodeRepositoryImpl implements NodeRepository { return Optional.empty(); } if (nodeResponse.nodes.size() != 1) { - throw new RuntimeException("Did not get data for one node using hostname=" + hostName.toString() + "\n" + nodeResponse.toString()); + throw new RuntimeException("Did not get data for one node using hostname=" + hostName + "\n" + nodeResponse.toString()); } return Optional.of(createContainerNodeSpec(nodeResponse.nodes.get(0))); } @@ -84,8 +84,8 @@ public class NodeRepositoryImpl implements NodeRepository { private static ContainerNodeSpec createContainerNodeSpec(GetNodesResponse.Node node) throws IllegalArgumentException, NullPointerException { Objects.requireNonNull(node.nodeState, "Unknown node state"); - NodeState nodeState = NodeState.valueOf(node.nodeState.toUpperCase()); - if (nodeState == NodeState.ACTIVE) { + Node.State nodeState = Node.State.valueOf(node.nodeState); + if (nodeState == Node.State.active) { Objects.requireNonNull(node.wantedDockerImage, "Unknown docker image for active node"); Objects.requireNonNull(node.wantedRestartGeneration, "Unknown wantedRestartGeneration for active node"); Objects.requireNonNull(node.currentRestartGeneration, "Unknown currentRestartGeneration for active node"); @@ -93,11 +93,27 @@ public class NodeRepositoryImpl implements NodeRepository { String hostName = Objects.requireNonNull(node.hostname, "hostname is null"); + ContainerNodeSpec.Owner owner = null; + if (node.owner != null) { + owner = new ContainerNodeSpec.Owner(node.owner.tenant, node.owner.application, node.owner.instance); + } + + ContainerNodeSpec.Membership membership = null; + if (node.membership != null) { + membership = new ContainerNodeSpec.Membership(node.membership.clusterType, node.membership.clusterId, + node.membership.group, node.membership.index, node.membership.retired); + } + return new ContainerNodeSpec( - new HostName(hostName), + hostName, Optional.ofNullable(node.wantedDockerImage).map(DockerImage::new), containerNameFromHostName(hostName), nodeState, + node.nodeType, + node.nodeFlavor, + Optional.ofNullable(node.vespaVersion), + Optional.ofNullable(owner), + Optional.ofNullable(membership), Optional.ofNullable(node.wantedRestartGeneration), Optional.ofNullable(node.currentRestartGeneration), Optional.ofNullable(node.minCpuCores), @@ -110,7 +126,7 @@ public class NodeRepositoryImpl implements NodeRepository { } @Override - public void updateNodeAttributes(final HostName hostName, final NodeAttributes nodeAttributes) throws IOException { + public void updateNodeAttributes(final String hostName, final NodeAttributes nodeAttributes) throws IOException { UpdateNodeAttributesResponse response = requestExecutor.patch( "/nodes/v2/node/" + hostName, port, @@ -124,7 +140,7 @@ public class NodeRepositoryImpl implements NodeRepository { } @Override - public void markAsReady(final HostName hostName) throws IOException { + public void markAsReady(final String hostName) throws IOException { NodeReadyResponse response = requestExecutor.put( "/nodes/v2/state/ready/" + hostName, port, diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeState.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeState.java deleted file mode 100644 index ca2a0bb9955..00000000000 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeState.java +++ /dev/null @@ -1,7 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.node.admin.noderepository; - -// TODO: Unite with com.yahoo.vespa.hosted.provision.Node.State -public enum NodeState { - PROVISIONED, READY, RESERVED, ACTIVE, INACTIVE, DIRTY, FAILED -} diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/bindings/GetNodesResponse.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/bindings/GetNodesResponse.java index b0b52f911ac..37f29591783 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/bindings/GetNodesResponse.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/bindings/GetNodesResponse.java @@ -29,6 +29,11 @@ public class GetNodesResponse { public final String wantedDockerImage; public final String currentDockerImage; public final String nodeState; + public final String nodeType; + public final String nodeFlavor; + public final String vespaVersion; + public final Owner owner; + public final Membership membership; public final Long wantedRestartGeneration; public final Long currentRestartGeneration; public final Double minCpuCores; @@ -40,6 +45,11 @@ public class GetNodesResponse { @JsonProperty("wantedDockerImage") String wantedDockerImage, @JsonProperty("currentDockerImage") String currentDockerImage, @JsonProperty("state") String nodeState, + @JsonProperty("type") String nodeType, + @JsonProperty("flavor") String nodeFlavor, + @JsonProperty("vespaVersion") String vespaVersion, + @JsonProperty("owner") Owner owner, + @JsonProperty("membership") Membership membership, @JsonProperty("restartGeneration") Long wantedRestartGeneration, @JsonProperty("currentRestartGeneration") Long currentRestartGeneration, @JsonProperty("minCpuCores") Double minCpuCores, @@ -49,6 +59,11 @@ public class GetNodesResponse { this.wantedDockerImage = wantedDockerImage; this.currentDockerImage = currentDockerImage; this.nodeState = nodeState; + this.nodeType = nodeType; + this.nodeFlavor = nodeFlavor; + this.vespaVersion = vespaVersion; + this.owner = owner; + this.membership = membership; this.wantedRestartGeneration = wantedRestartGeneration; this.currentRestartGeneration = currentRestartGeneration; this.minCpuCores = minCpuCores; @@ -62,6 +77,11 @@ public class GetNodesResponse { + " wantedDockerImage = " + wantedDockerImage + " currentDockerImage = " + currentDockerImage + " nodeState = " + nodeState + + " nodeType = " + nodeType + + " nodeFlavor = " + nodeFlavor + + " vespaVersion = " + vespaVersion + + " owner = " + owner + + " membership = " + membership + " wantedRestartGeneration = " + wantedRestartGeneration + " currentRestartGeneration = " + currentRestartGeneration + " minCpuCores = " + minCpuCores @@ -69,5 +89,61 @@ public class GetNodesResponse { + " minDiskAvailableGb = " + minDiskAvailableGb + " }"; } + + + public static class Owner { + public final String tenant; + public final String application; + public final String instance; + + public Owner( + @JsonProperty("tenant") String tenant, + @JsonProperty("application") String application, + @JsonProperty("instance") String instance) { + this.tenant = tenant; + this.application = application; + this.instance = instance; + } + + public String toString() { + return "Owner {" + + " tenant = " + tenant + + " application = " + application + + " instance = " + instance + + " }"; + } + } + + public static class Membership { + public final String clusterType; + public final String clusterId; + public final String group; + public final int index; + public final boolean retired; + + public Membership( + @JsonProperty("clustertype") String clusterType, + @JsonProperty("clusterid") String clusterId, + @JsonProperty("group") String group, + @JsonProperty("index") int index, + @JsonProperty("retired") boolean retired) { + this.clusterType = clusterType; + this.clusterId = clusterId; + this.group = group; + this.index = index; + this.retired = retired; + } + + @Override + public String toString() { + return "Membership {" + + " clusterType = " + clusterType + + " clusterId = " + clusterId + + " group = " + group + + " index = " + index + + " retired = " + retired + + " }"; + } + } } } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/Orchestrator.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/Orchestrator.java index 392fed52ced..9728e997647 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/Orchestrator.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/Orchestrator.java @@ -1,8 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.orchestrator; -import com.yahoo.vespa.applicationmodel.HostName; - import java.util.List; import java.util.Optional; @@ -15,12 +13,12 @@ public interface Orchestrator { /** * Invokes orchestrator suspend of a host. Returns whether suspend was granted. */ - boolean suspend(HostName hostName); + boolean suspend(String hostName); /** * Invokes orchestrator resume of a host. Returns whether resume was granted. */ - boolean resume(HostName hostName); + boolean resume(String hostName); /** * Invokes orchestrator suspend hosts. Returns failure reasons when failing. diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java index 768807f1665..b91c2411d95 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java @@ -1,6 +1,7 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.orchestrator; +import com.yahoo.vespa.defaults.Defaults; import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepositoryImpl; import com.yahoo.vespa.hosted.node.admin.util.ConfigServerHttpRequestExecutor; @@ -11,7 +12,6 @@ import com.yahoo.vespa.orchestrator.restapi.HostSuspensionApi; import com.yahoo.vespa.orchestrator.restapi.wire.BatchHostSuspendRequest; import com.yahoo.vespa.orchestrator.restapi.wire.BatchOperationResult; import com.yahoo.vespa.orchestrator.restapi.wire.UpdateHostResponse; -import com.yahoo.vespa.applicationmodel.HostName; import java.util.List; import java.util.Optional; import java.util.Set; @@ -23,8 +23,7 @@ import java.util.Set; */ public class OrchestratorImpl implements Orchestrator { private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(OrchestratorImpl.class); - // TODO: Figure out the port dynamically. - static final int HARDCODED_ORCHESTRATOR_PORT = 19071; + static final int WEB_SERVICE_PORT = Defaults.getDefaults().vespaWebServicePort(); // TODO: Find a way to avoid duplicating this (present in orchestrator's services.xml also). private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator"; static final String ORCHESTRATOR_PATH_PREFIX_HOST_API @@ -39,7 +38,7 @@ public class OrchestratorImpl implements Orchestrator { this.requestExecutor = requestExecutor; } - public OrchestratorImpl(Set<HostName> configServerHosts) { + public OrchestratorImpl(Set<String> configServerHosts) { if (configServerHosts.isEmpty()) { throw new IllegalStateException("Environment setting for config servers missing or empty."); } @@ -47,14 +46,14 @@ public class OrchestratorImpl implements Orchestrator { } @Override - public boolean suspend(final HostName hostName) { + public boolean suspend(final String hostName) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(OrchestratorImpl.class, - NodeRepositoryImpl.containerNameFromHostName(hostName.toString())); + NodeRepositoryImpl.containerNameFromHostName(hostName)); try { final UpdateHostResponse updateHostResponse = requestExecutor.put( ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended", - HARDCODED_ORCHESTRATOR_PORT, + WEB_SERVICE_PORT, Optional.empty(), /* body */ UpdateHostResponse.class); return updateHostResponse.reason() == null; @@ -73,7 +72,7 @@ public class OrchestratorImpl implements Orchestrator { try { final BatchOperationResult batchOperationResult = requestExecutor.put( ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, - HARDCODED_ORCHESTRATOR_PORT, + WEB_SERVICE_PORT, Optional.of(new BatchHostSuspendRequest(parentHostName, hostNames)), BatchOperationResult.class); return batchOperationResult.getFailureReason(); @@ -84,13 +83,13 @@ public class OrchestratorImpl implements Orchestrator { } @Override - public boolean resume(final HostName hostName) { + public boolean resume(final String hostName) { PrefixLogger logger = PrefixLogger.getNodeAgentLogger(OrchestratorImpl.class, - NodeRepositoryImpl.containerNameFromHostName(hostName.toString())); + NodeRepositoryImpl.containerNameFromHostName(hostName)); try { final UpdateHostResponse batchOperationResult = requestExecutor.delete( ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended", - HARDCODED_ORCHESTRATOR_PORT, + WEB_SERVICE_PORT, UpdateHostResponse.class); return batchOperationResult.reason() == null; } catch (ConfigServerHttpRequestExecutor.NotFoundException n) { diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java index b0e07e03eea..f00b6bb828c 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java @@ -1,6 +1,7 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.provider; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater; /** @@ -10,4 +11,6 @@ import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater; */ public interface ComponentsProvider { NodeAdminStateUpdater getNodeAdminStateUpdater(); + + MetricReceiverWrapper getMetricReceiverWrapper(); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java index 9d3d4c31afc..b6d94bb0557 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java @@ -1,9 +1,10 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.provider; -import com.yahoo.vespa.applicationmodel.HostName; -import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler; -import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceSchedulerImpl; +import com.yahoo.vespa.defaults.Defaults; +import com.yahoo.vespa.hosted.dockerapi.ContainerName; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; +import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater; @@ -16,7 +17,11 @@ import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepositoryImpl; import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator; import com.yahoo.vespa.hosted.node.admin.orchestrator.OrchestratorImpl; import com.yahoo.vespa.hosted.node.admin.util.Environment; +import com.yahoo.vespa.hosted.node.admin.util.SecretAgentScheduleMaker; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.Set; import java.util.function.Function; @@ -27,37 +32,64 @@ import java.util.function.Function; */ public class ComponentsProviderImpl implements ComponentsProvider { - private final Docker docker; private final NodeAdminStateUpdater nodeAdminStateUpdater; + private final MetricReceiverWrapper metricReceiverWrapper; private static final long INITIAL_SCHEDULER_DELAY_MILLIS = 1; - private static final int NODE_AGENT_SCAN_INTERVAL_MILLIS = 60000; - private static final int HARDCODED_NODEREPOSITORY_PORT = 19071; + private static final int NODE_AGENT_SCAN_INTERVAL_MILLIS = 30000; + private static final int WEB_SERVICE_PORT = Defaults.getDefaults().vespaWebServicePort(); private static final String ENV_HOSTNAME = "HOSTNAME"; // We only scan for new nodes within a host every 5 minutes. This is only if new nodes are added or removed // which happens rarely. Changes of apps running etc it detected by the NodeAgent. private static final int NODE_ADMIN_STATE_INTERVAL_MILLIS = 5 * 60000; - public ComponentsProviderImpl(final Docker docker) { - this.docker = docker; + + public ComponentsProviderImpl(final Docker docker, final MetricReceiverWrapper metricReceiver) { String baseHostName = java.util.Optional.ofNullable(System.getenv(ENV_HOSTNAME)) .orElseThrow(() -> new IllegalStateException("Environment variable " + ENV_HOSTNAME + " unset")); Environment environment = new Environment(); - Set<HostName> configServerHosts = environment.getConfigServerHosts(); + Set<String> configServerHosts = environment.getConfigServerHosts(); Orchestrator orchestrator = new OrchestratorImpl(configServerHosts); - NodeRepository nodeRepository = new NodeRepositoryImpl(configServerHosts, HARDCODED_NODEREPOSITORY_PORT, baseHostName); - MaintenanceScheduler maintenanceScheduler = new MaintenanceSchedulerImpl(); + NodeRepository nodeRepository = new NodeRepositoryImpl(configServerHosts, WEB_SERVICE_PORT, baseHostName); + StorageMaintainer storageMaintainer = new StorageMaintainer(); - final Function<HostName, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName, nodeRepository, - orchestrator, new DockerOperationsImpl(docker, environment), maintenanceScheduler); - final NodeAdmin nodeAdmin = new NodeAdminImpl(docker, nodeAgentFactory, maintenanceScheduler, NODE_AGENT_SCAN_INTERVAL_MILLIS); + final Function<String, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName, nodeRepository, + orchestrator, new DockerOperationsImpl(docker, environment), storageMaintainer, metricReceiver); + final NodeAdmin nodeAdmin = new NodeAdminImpl(docker, nodeAgentFactory, storageMaintainer, + NODE_AGENT_SCAN_INTERVAL_MILLIS, metricReceiver); nodeAdminStateUpdater = new NodeAdminStateUpdater( nodeRepository, nodeAdmin, INITIAL_SCHEDULER_DELAY_MILLIS, NODE_ADMIN_STATE_INTERVAL_MILLIS, orchestrator, baseHostName); + + metricReceiverWrapper = metricReceiver; + initializeNodeAgentSecretAgent(docker, environment.getZone()); } @Override public NodeAdminStateUpdater getNodeAdminStateUpdater() { return nodeAdminStateUpdater; } + + @Override + public MetricReceiverWrapper getMetricReceiverWrapper() { + return metricReceiverWrapper; + } + + + private void initializeNodeAgentSecretAgent(Docker docker, String zone) { + ContainerName nodeAdminName = new ContainerName("node-admin"); + final Path yamasAgentFolder = Paths.get("/etc/yamas-agent/"); + docker.executeInContainer(nodeAdminName, "sudo", "chmod", "a+w", yamasAgentFolder.toString()); + + Path nodeAdminCheckPath = Paths.get("/usr/bin/curl"); + SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("node-admin", 60, nodeAdminCheckPath, + "localhost:4080/rest/metrics").withTag("zone", zone); + + try { + scheduleMaker.writeTo(yamasAgentFolder); + } catch (IOException e) { + throw new RuntimeException("Failed to write secret-agent schedules for node-admin", e); + } + docker.executeInContainer(nodeAdminName, "service", "yamas-agent", "restart"); + } } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java index 2ce4151f497..b8a4e786e87 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java @@ -7,12 +7,14 @@ import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.jdisc.LoggingRequestHandler; import com.yahoo.container.logging.AccessLog; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater; import com.yahoo.vespa.hosted.node.admin.provider.ComponentsProvider; import javax.ws.rs.core.MediaType; import java.io.IOException; import java.io.OutputStream; +import java.io.PrintStream; import java.nio.charset.StandardCharsets; import java.util.Optional; import java.util.concurrent.Executor; @@ -30,12 +32,14 @@ import static com.yahoo.jdisc.http.HttpRequest.Method.PUT; */ public class RestApiHandler extends LoggingRequestHandler{ - private final NodeAdminStateUpdater refresher; private final static ObjectMapper objectMapper = new ObjectMapper(); + private final NodeAdminStateUpdater refresher; + private final MetricReceiverWrapper metricReceiverWrapper; public RestApiHandler(Executor executor, AccessLog accessLog, ComponentsProvider componentsProvider) { super(executor, accessLog); this.refresher = componentsProvider.getNodeAdminStateUpdater(); + this.metricReceiverWrapper = componentsProvider.getMetricReceiverWrapper(); } @Override @@ -47,16 +51,29 @@ public class RestApiHandler extends LoggingRequestHandler{ return handlePut(request); } return new SimpleResponse(400, "Only PUT and GET are implemented."); - } private HttpResponse handleGet(HttpRequest request) { String path = request.getUri().getPath(); if (path.endsWith("/info")) { + return new SimpleObjectResponse(200, refresher.getDebugPage()); + } + + if (path.endsWith("/metrics")) { return new HttpResponse(200) { @Override + public String getContentType() { + return MediaType.APPLICATION_JSON; + } + + @Override public void render(OutputStream outputStream) throws IOException { - objectMapper.writeValue(outputStream, refresher.getDebugPage()); + try (PrintStream printStream = new PrintStream(outputStream)) { + for (MetricReceiverWrapper.DimensionMetrics dimensionMetrics : metricReceiverWrapper) { + String secretAgentJsonReport = dimensionMetrics.toSecretAgentReport() + "\n"; + printStream.write(secretAgentJsonReport.getBytes(StandardCharsets.UTF_8.name())); + } + } } }; } @@ -84,10 +101,9 @@ public class RestApiHandler extends LoggingRequestHandler{ } private static class SimpleResponse extends HttpResponse { - private final String jsonMessage; - public SimpleResponse(int code, String message) { + SimpleResponse(int code, String message) { super(code); ObjectNode objectNode = objectMapper.createObjectNode(); objectNode.put("jsonMessage", message); @@ -105,4 +121,22 @@ public class RestApiHandler extends LoggingRequestHandler{ } } + private static class SimpleObjectResponse extends HttpResponse { + private final Object response; + + SimpleObjectResponse(int status, Object response) { + super(status); + this.response = response; + } + + @Override + public String getContentType() { + return MediaType.APPLICATION_JSON; + } + + @Override + public void render(OutputStream outputStream) throws IOException { + objectMapper.writeValue(outputStream, response); + } + } } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutor.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutor.java index d9b5f75931d..17d3f5ca2a4 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutor.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutor.java @@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.node.admin.util; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; -import com.yahoo.vespa.applicationmodel.HostName; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; @@ -35,10 +34,10 @@ public class ConfigServerHttpRequestExecutor { private final ObjectMapper mapper = new ObjectMapper(); private final HttpClient client; - private final Set<HostName> configServerHosts; + private final Set<String> configServerHosts; private final static int MAX_LOOPS = 2; - public static ConfigServerHttpRequestExecutor create(Set<HostName> configServerHosts) { + public static ConfigServerHttpRequestExecutor create(Set<String> configServerHosts) { PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(); // Increase max total connections to 200, which should be enough cm.setMaxTotal(200); @@ -46,13 +45,13 @@ public class ConfigServerHttpRequestExecutor { .setConnectionManager(cm).build()); } - public ConfigServerHttpRequestExecutor(Set<HostName> configServerHosts, HttpClient client) { + public ConfigServerHttpRequestExecutor(Set<String> configServerHosts, HttpClient client) { this.configServerHosts = configServerHosts; this.client = client; } public interface CreateRequest { - HttpUriRequest createRequest(HostName configserver) throws JsonProcessingException, UnsupportedEncodingException; + HttpUriRequest createRequest(String configserver) throws JsonProcessingException, UnsupportedEncodingException; } public class NotFoundException extends RuntimeException { @@ -65,7 +64,7 @@ public class ConfigServerHttpRequestExecutor { public <T> T tryAllConfigServers(CreateRequest requestFactory, Class<T> wantedReturnType) { Exception lastException = null; for (int loopRetry = 0; loopRetry < MAX_LOOPS; loopRetry++) { - for (HostName configServer : configServerHosts) { + for (String configServer : configServerHosts) { final HttpResponse response; try { response = client.execute(requestFactory.createRequest(configServer)); diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/Environment.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/Environment.java index 6a47d16ee97..297a9a49294 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/Environment.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/Environment.java @@ -1,8 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.util; -import com.yahoo.vespa.applicationmodel.HostName; - import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Arrays; @@ -19,19 +17,19 @@ import java.util.stream.Collectors; public class Environment { private static final String ENV_CONFIGSERVERS = "services__addr_configserver"; private static final String ENV_NETWORK_TYPE = "NETWORK_TYPE"; + private static final String ENVIRONMENT = "ENVIRONMENT"; + private static final String REGION = "REGION"; public enum NetworkType { normal, local, vm } - public Set<HostName> getConfigServerHosts() { + public Set<String> getConfigServerHosts() { final String configServerHosts = System.getenv(ENV_CONFIGSERVERS); if (configServerHosts == null) { return Collections.emptySet(); } final List<String> hostNameStrings = Arrays.asList(configServerHosts.split("[,\\s]+")); - return hostNameStrings.stream() - .map(HostName::new) - .collect(Collectors.toSet()); + return hostNameStrings.stream().collect(Collectors.toSet()); } public NetworkType networkType() throws IllegalArgumentException { @@ -42,6 +40,24 @@ public class Environment { return NetworkType.valueOf(networkTypeInEnvironment); } + public String getEnvironment() { + return getEnvironmentVariable(ENVIRONMENT); + } + + public String getRegion() { + return getEnvironmentVariable(REGION); + } + + private String getEnvironmentVariable(String name) { + final String value = System.getenv(name); + if (value == null) throw new IllegalStateException(String.format("Environment variable %s not set", name)); + return value; + } + + public String getZone() { + return getEnvironment() + "." + getRegion(); + } + public InetAddress getInetAddressForHost(String hostname) throws UnknownHostException { return InetAddress.getByName(hostname); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/SecretAgentScheduleMaker.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/SecretAgentScheduleMaker.java new file mode 100644 index 00000000000..e105aab7b46 --- /dev/null +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/SecretAgentScheduleMaker.java @@ -0,0 +1,65 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.node.admin.util; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * Helper class to generate and write the secret-agent schedule file. + * + * @author valerijf + */ +public class SecretAgentScheduleMaker { + private final String id; + private final int interval; + private final Path checkExecuteable; + private final String[] arguments; + private String user = "nobody"; + private final Map<String, Object> tags = new LinkedHashMap<>(); + + public SecretAgentScheduleMaker(String id, int interval, Path checkExecuteable, String... arguments) { + this.id = id; + this.interval = interval; + this.checkExecuteable = checkExecuteable; + this.arguments = arguments; + } + + public SecretAgentScheduleMaker withRunAsUser(String user) { + this.user = user; + return this; + } + + public SecretAgentScheduleMaker withTag(String tagKey, Object tagValue) { + tags.put(tagKey, tagValue); + return this; + } + + public void writeTo(Path yamasAgentDirectory) throws IOException { + Path scheduleFilePath = yamasAgentDirectory.resolve(id + ".yaml"); + Files.write(scheduleFilePath, toString().getBytes()); + scheduleFilePath.toFile().setReadable(true, false); // Give everyone read access to the schedule file + } + + public String toString() { + StringBuilder stringBuilder = new StringBuilder() + .append("- id: ").append(id).append("\n") + .append(" interval: ").append(interval).append("\n") + .append(" user: ").append(user).append("\n") + .append(" check: ").append(checkExecuteable.toFile()).append("\n"); + + if (arguments.length > 0) { + stringBuilder.append(" args: \n"); + for (String arg : arguments) { + stringBuilder.append(" - ").append(arg).append("\n"); + } + } + + stringBuilder.append(" tags:\n").append(" namespace: Vespa\n"); + tags.forEach((key, value) -> stringBuilder.append(" ").append(key).append(": ").append(value).append("\n")); + + return stringBuilder.toString(); + } +} diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImplTest.java index a36059df104..e9a79fdc040 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImplTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImplTest.java @@ -13,9 +13,7 @@ import java.util.Optional; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyVararg; -import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.*; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -23,7 +21,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class DockerOperationsImplTest { - private final Environment environment = new Environment(); + private final Environment environment = mock(Environment.class); private final Docker docker = mock(Docker.class); private final DockerOperationsImpl dockerOperations = new DockerOperationsImpl(docker, environment); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/CallOrderVerifier.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/CallOrderVerifier.java index 481d4da67bf..879b9af4bc2 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/CallOrderVerifier.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/CallOrderVerifier.java @@ -5,6 +5,8 @@ import java.util.Iterator; import java.util.LinkedList; import java.util.List; +import static org.junit.Assert.assertTrue; + /** * Takes in strings representing function calls with their parameters and allows to check whether a subset of calls * occurred in a specific order. For example, by calling {@link CallOrderVerifier#add(String)} @@ -18,6 +20,8 @@ import java.util.List; * @author valerijf */ public class CallOrderVerifier { + private static final int waitForCallOrderTimeout = 60000; //ms + private final LinkedList<String> callOrder = new LinkedList<>(); private final Object monitor = new Object(); @@ -27,6 +31,16 @@ public class CallOrderVerifier { } } + public void assertInOrder(String... functionCalls) { + assertInOrderWithAssertMessage("", functionCalls); + } + + public void assertInOrderWithAssertMessage(String assertMessage, String... functionCalls) { + boolean inOrder = verifyInOrder(waitForCallOrderTimeout, functionCalls); + if ( ! inOrder && ! assertMessage.isEmpty()) + System.err.println(assertMessage); + assertTrue(toString(), inOrder); + } /** * Checks if list of function calls occur in order given within a timeout @@ -34,7 +48,7 @@ public class CallOrderVerifier { * @param functionCalls The expected order of function calls * @return true if the actual order of calls was equal to the order provided within timeout, false otherwise. */ - public boolean verifyInOrder(long timeout, String... functionCalls) { + private boolean verifyInOrder(long timeout, String... functionCalls) { final long startTime = System.currentTimeMillis(); while (System.currentTimeMillis() - startTime < timeout) { if (verifyInOrder(functionCalls)) { @@ -54,11 +68,11 @@ public class CallOrderVerifier { int pos = 0; synchronized (monitor) { for (String functionCall : functionCalls) { - int temp = indexOf(callOrder, functionCall, pos); - if (temp < pos) { + int temp = indexOf(callOrder.listIterator(pos), functionCall); + if (temp == -1) { return false; } - pos = temp; + pos += temp; } } @@ -67,21 +81,24 @@ public class CallOrderVerifier { /** * Finds the first index of needle in haystack after a given position. - * @param haystack List to search for an element in - * @param needle Element to find in list - * @param startPos Index to start search from - * @return Index of the next needle in haystack after startPos, -1 if not found + * @param iter Iterator to search in + * @param search Element to find in iterator + * @return Index of the next search in after startPos, -1 if not found */ - private int indexOf(List<String> haystack, String needle, int startPos) { - synchronized (monitor) { - Iterator<String> iter = haystack.listIterator(startPos); - for (int i = startPos; iter.hasNext(); i++) { - if (needle.equals(iter.next())) { - return i; - } + private int indexOf(Iterator<String> iter, String search) { + for (int i = 0; iter.hasNext(); i++) { + if (search.equals(iter.next())) { + return i; } } return -1; } + + @Override + public String toString() { + synchronized (monitor) { + return callOrder.toString(); + } + } } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java index d57fd284aa1..7ded32227bb 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java @@ -1,7 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.integrationTests; -import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.metrics.simple.MetricReceiver; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater; @@ -20,20 +21,26 @@ import java.util.function.Function; * @author dybis */ public class ComponentsProviderWithMocks implements ComponentsProvider { - static final CallOrderVerifier callOrder = new CallOrderVerifier(); - static final NodeRepoMock nodeRepositoryMock = new NodeRepoMock(callOrder); - static final MaintenanceSchedulerMock maintenanceSchedulerMock = new MaintenanceSchedulerMock(callOrder); - static final OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder); - static final Docker dockerMock = new DockerMock(callOrder); + static final CallOrderVerifier callOrderVerifier = new CallOrderVerifier(); + static final NodeRepoMock nodeRepositoryMock = new NodeRepoMock(callOrderVerifier); + static final StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrderVerifier); + static final OrchestratorMock orchestratorMock = new OrchestratorMock(callOrderVerifier); + static final Docker dockerMock = new DockerMock(callOrderVerifier); private Environment environment = new Environment(); - private final Function<HostName, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName, - nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock); - private NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100); + private final MetricReceiverWrapper mr = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + private final Function<String, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName, + nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock, mr); + private NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100, mr); @Override public NodeAdminStateUpdater getNodeAdminStateUpdater() { return new NodeAdminStateUpdater(nodeRepositoryMock, nodeAdmin, 1, 5, orchestratorMock, "localhost"); } + + @Override + public MetricReceiverWrapper getMetricReceiverWrapper() { + return null; + } } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java index ccb39024307..00e64ed947d 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java @@ -1,10 +1,11 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.integrationTests; -import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.metrics.simple.MetricReceiver; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.Docker; import com.yahoo.vespa.hosted.dockerapi.DockerImage; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.node.admin.docker.DockerOperationsImpl; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin; @@ -12,8 +13,8 @@ import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; import com.yahoo.vespa.hosted.node.admin.util.Environment; +import com.yahoo.vespa.hosted.provision.Node; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -24,7 +25,6 @@ import java.util.Collections; import java.util.Optional; import java.util.function.Function; -import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.*; @@ -32,32 +32,38 @@ import static org.mockito.Mockito.*; * @author valerijf */ public class DockerFailTest { - private CallOrderVerifier callOrder; + private CallOrderVerifier callOrderVerifier; private Docker dockerMock; private NodeAdminStateUpdater updater; private ContainerNodeSpec initialContainerNodeSpec; @Before public void before() throws InterruptedException, UnknownHostException { - callOrder = new CallOrderVerifier(); - MaintenanceSchedulerMock maintenanceSchedulerMock = new MaintenanceSchedulerMock(callOrder); - OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder); - NodeRepoMock nodeRepositoryMock = new NodeRepoMock(callOrder); - dockerMock = new DockerMock(callOrder); + callOrderVerifier = new CallOrderVerifier(); + StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrderVerifier); + OrchestratorMock orchestratorMock = new OrchestratorMock(callOrderVerifier); + NodeRepoMock nodeRepositoryMock = new NodeRepoMock(callOrderVerifier); + dockerMock = new DockerMock(callOrderVerifier); Environment environment = mock(Environment.class); when(environment.getConfigServerHosts()).thenReturn(Collections.emptySet()); when(environment.getInetAddressForHost(any(String.class))).thenReturn(InetAddress.getByName("1.1.1.1")); - Function<HostName, NodeAgent> nodeAgentFactory = (hostName) -> - new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock); - NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100); + MetricReceiverWrapper mr = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + Function<String, NodeAgent> nodeAgentFactory = (hostName) -> + new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock, mr); + NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100, mr); initialContainerNodeSpec = new ContainerNodeSpec( - new HostName("hostName"), + "hostName", Optional.of(new DockerImage("dockerImage")), new ContainerName("container"), - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(1L), Optional.of(1L), Optional.of(1d), @@ -72,7 +78,7 @@ public class DockerFailTest { Thread.sleep(10); } - assert callOrder.verifyInOrder(1000, + callOrderVerifier.assertInOrder( "createContainerCommand with DockerImage: DockerImage { imageId=dockerImage }, HostName: hostName, ContainerName: ContainerName { name=container }", "executeInContainer with ContainerName: ContainerName { name=container }, args: [/usr/bin/env, test, -x, /opt/yahoo/vespa/bin/vespa-nodectl]", "executeInContainer with ContainerName: ContainerName { name=container }, args: [/opt/yahoo/vespa/bin/vespa-nodectl, resume]"); @@ -87,10 +93,10 @@ public class DockerFailTest { public void dockerFailTest() throws InterruptedException { dockerMock.deleteContainer(initialContainerNodeSpec.containerName); - assertTrue(callOrder.verifyInOrder(1000, + callOrderVerifier.assertInOrder( "deleteContainer with ContainerName: ContainerName { name=container }", "createContainerCommand with DockerImage: DockerImage { imageId=dockerImage }, HostName: hostName, ContainerName: ContainerName { name=container }", "executeInContainer with ContainerName: ContainerName { name=container }, args: [/usr/bin/env, test, -x, /opt/yahoo/vespa/bin/vespa-nodectl]", - "executeInContainer with ContainerName: ContainerName { name=container }, args: [/opt/yahoo/vespa/bin/vespa-nodectl, resume]")); + "executeInContainer with ContainerName: ContainerName { name=container }, args: [/opt/yahoo/vespa/bin/vespa-nodectl, resume]"); } } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java index 9ed47d32705..4d13453ec86 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.integrationTests; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.dockerapi.Container; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.Docker; @@ -24,21 +23,21 @@ import java.util.stream.Collectors; */ public class DockerMock implements Docker { private List<Container> containers = new ArrayList<>(); - public final CallOrderVerifier callOrder; + public final CallOrderVerifier callOrderVerifier; private static final Object monitor = new Object(); - public DockerMock(CallOrderVerifier callOrder) { - this.callOrder = callOrder; + public DockerMock(CallOrderVerifier callOrderVerifier) { + this.callOrderVerifier = callOrderVerifier; } @Override public CreateContainerCommand createContainerCommand( DockerImage dockerImage, ContainerName containerName, - HostName hostName) { + String hostName) { synchronized (monitor) { - callOrder.add("createContainerCommand with DockerImage: " + dockerImage + ", HostName: " + hostName + - ", ContainerName: " + containerName); + callOrderVerifier.add("createContainerCommand with DockerImage: " + dockerImage + ", HostName: " + hostName + + ", ContainerName: " + containerName); containers.add(new Container(hostName, dockerImage, containerName, true)); } @@ -48,26 +47,36 @@ public class DockerMock implements Docker { @Override public void connectContainerToNetwork(ContainerName containerName, String networkName) { synchronized (monitor) { - callOrder.add("Connecting " + containerName + " to network: " + networkName); + callOrderVerifier.add("Connecting " + containerName + " to network: " + networkName); } } @Override + public void copyArchiveToContainer(String sourcePath, ContainerName destinationContainer, String destinationPath) { + + } + + @Override public ContainerInfo inspectContainer(ContainerName containerName) { return () -> Optional.of(2); } @Override + public ContainerStats getContainerStats(ContainerName containerName) { + return null; + } + + @Override public void startContainer(ContainerName containerName) { synchronized (monitor) { - callOrder.add("startContainer with ContainerName: " + containerName); + callOrderVerifier.add("startContainer with ContainerName: " + containerName); } } @Override public void stopContainer(ContainerName containerName) { synchronized (monitor) { - callOrder.add("stopContainer with ContainerName: " + containerName); + callOrderVerifier.add("stopContainer with ContainerName: " + containerName); containers = containers.stream() .map(container -> container.name.equals(containerName) ? new Container(container.hostname, container.image, container.name, false) : container) @@ -78,7 +87,7 @@ public class DockerMock implements Docker { @Override public void deleteContainer(ContainerName containerName) { synchronized (monitor) { - callOrder.add("deleteContainer with ContainerName: " + containerName); + callOrderVerifier.add("deleteContainer with ContainerName: " + containerName); containers = containers.stream() .filter(container -> !container.name.equals(containerName)) .collect(Collectors.toList()); @@ -93,7 +102,7 @@ public class DockerMock implements Docker { } @Override - public Optional<Container> getContainer(HostName hostname) { + public Optional<Container> getContainer(String hostname) { synchronized (monitor) { return containers.stream().filter(container -> container.hostname.equals(hostname)).findFirst(); } @@ -102,7 +111,7 @@ public class DockerMock implements Docker { @Override public CompletableFuture<DockerImage> pullImageAsync(DockerImage image) { synchronized (monitor) { - callOrder.add("pullImageAsync with DockerImage: " + image); + callOrderVerifier.add("pullImageAsync with DockerImage: " + image); final CompletableFuture<DockerImage> completableFuture = new CompletableFuture<>(); new Thread() { public void run() { @@ -137,8 +146,8 @@ public class DockerMock implements Docker { @Override public ProcessResult executeInContainer(ContainerName containerName, String... args) { synchronized (monitor) { - callOrder.add("executeInContainer with ContainerName: " + containerName + - ", args: " + Arrays.toString(args)); + callOrderVerifier.add("executeInContainer with ContainerName: " + containerName + + ", args: " + Arrays.toString(args)); } return new ProcessResult(0, null, ""); } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MaintenanceSchedulerMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MaintenanceSchedulerMock.java deleted file mode 100644 index 30ddc71f546..00000000000 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MaintenanceSchedulerMock.java +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.node.admin.integrationTests; - -import com.yahoo.vespa.hosted.dockerapi.ContainerName; -import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler; - -import java.io.IOException; - -/** - * @author valerijf - */ -public class MaintenanceSchedulerMock implements MaintenanceScheduler { - private final CallOrderVerifier callOrder; - - public MaintenanceSchedulerMock(CallOrderVerifier callOrder) { - this.callOrder = callOrder; - } - - @Override - public void removeOldFilesFromNode(ContainerName containerName) { - - } - - @Override - public void cleanNodeAdmin() { - - } - - @Override - public void deleteContainerStorage(ContainerName containerName) throws IOException { - callOrder.add("DeleteContainerStorage with ContainerName: " + containerName); - } -} diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java index f67e21bbcd3..1896e46dd00 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java @@ -1,19 +1,19 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.integrationTests; -import com.yahoo.vespa.applicationmodel.HostName; -import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; - +import com.yahoo.metrics.simple.MetricReceiver; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.DockerImage; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; +import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl; import com.yahoo.vespa.hosted.node.admin.docker.DockerOperationsImpl; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; import com.yahoo.vespa.hosted.node.admin.util.Environment; +import com.yahoo.vespa.hosted.provision.Node; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -25,7 +25,6 @@ import java.util.Collections; import java.util.Optional; import java.util.function.Function; -import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -34,7 +33,7 @@ import static org.mockito.Mockito.when; * @author valerijf */ public class MultiDockerTest { - private CallOrderVerifier callOrder; + private CallOrderVerifier callOrderVerifier; private NodeRepoMock nodeRepositoryMock; private DockerMock dockerMock; private NodeAdmin nodeAdmin; @@ -42,19 +41,20 @@ public class MultiDockerTest { @Before public void before() throws InterruptedException, UnknownHostException { - callOrder = new CallOrderVerifier(); - MaintenanceSchedulerMock maintenanceSchedulerMock = new MaintenanceSchedulerMock(callOrder); - OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder); - nodeRepositoryMock = new NodeRepoMock(callOrder); - dockerMock = new DockerMock(callOrder); + callOrderVerifier = new CallOrderVerifier(); + StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrderVerifier); + OrchestratorMock orchestratorMock = new OrchestratorMock(callOrderVerifier); + nodeRepositoryMock = new NodeRepoMock(callOrderVerifier); + dockerMock = new DockerMock(callOrderVerifier); Environment environment = mock(Environment.class); when(environment.getConfigServerHosts()).thenReturn(Collections.emptySet()); when(environment.getInetAddressForHost(any(String.class))).thenReturn(InetAddress.getByName("1.1.1.1")); - Function<HostName, NodeAgent> nodeAgentFactory = (hostName) -> - new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock); - nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100); + MetricReceiverWrapper mr = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + Function<String, NodeAgent> nodeAgentFactory = (hostName) -> + new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock, mr); + nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100, mr); updater = new NodeAdminStateUpdater(nodeRepositoryMock, nodeAdmin, 1, 1, orchestratorMock, "basehostname"); } @@ -65,15 +65,15 @@ public class MultiDockerTest { @Test public void test() throws InterruptedException, IOException { - addAndWaitForNode(new HostName("host1"), new ContainerName("container1"), Optional.of(new DockerImage("image1"))); + addAndWaitForNode("host1", new ContainerName("container1"), Optional.of(new DockerImage("image1"))); ContainerNodeSpec containerNodeSpec2 = - addAndWaitForNode(new HostName("host2"), new ContainerName("container2"), Optional.of(new DockerImage("image2"))); + addAndWaitForNode("host2", new ContainerName("container2"), Optional.of(new DockerImage("image2"))); nodeRepositoryMock.updateContainerNodeSpec( containerNodeSpec2.hostname, containerNodeSpec2.wantedDockerImage, containerNodeSpec2.containerName, - NodeState.DIRTY, + Node.State.dirty, containerNodeSpec2.wantedRestartGeneration, containerNodeSpec2.currentRestartGeneration, containerNodeSpec2.minCpuCores, @@ -83,13 +83,13 @@ public class MultiDockerTest { // Wait until it is marked ready Optional<ContainerNodeSpec> tempContainerNodeSpec; while ((tempContainerNodeSpec = nodeRepositoryMock.getContainerNodeSpec(containerNodeSpec2.hostname)).isPresent() - && tempContainerNodeSpec.get().nodeState != NodeState.READY) { + && tempContainerNodeSpec.get().nodeState != Node.State.ready) { Thread.sleep(10); } - addAndWaitForNode(new HostName("host3"), new ContainerName("container3"), Optional.of(new DockerImage("image1"))); + addAndWaitForNode("host3", new ContainerName("container3"), Optional.of(new DockerImage("image1"))); - assertTrue(callOrder.verifyInOrder(1000, + callOrderVerifier.assertInOrder( "createContainerCommand with DockerImage: DockerImage { imageId=image1 }, HostName: host1, ContainerName: ContainerName { name=container1 }", "executeInContainer with ContainerName: ContainerName { name=container1 }, args: [/usr/bin/env, test, -x, /opt/yahoo/vespa/bin/vespa-nodectl]", "executeInContainer with ContainerName: ContainerName { name=container1 }, args: [/opt/yahoo/vespa/bin/vespa-nodectl, resume]", @@ -103,25 +103,30 @@ public class MultiDockerTest { "createContainerCommand with DockerImage: DockerImage { imageId=image1 }, HostName: host3, ContainerName: ContainerName { name=container3 }", "executeInContainer with ContainerName: ContainerName { name=container3 }, args: [/usr/bin/env, test, -x, /opt/yahoo/vespa/bin/vespa-nodectl]", - "executeInContainer with ContainerName: ContainerName { name=container3 }, args: [/opt/yahoo/vespa/bin/vespa-nodectl, resume]")); + "executeInContainer with ContainerName: ContainerName { name=container3 }, args: [/opt/yahoo/vespa/bin/vespa-nodectl, resume]"); - assertTrue("Maintainer did not receive call to delete application storage", callOrder.verifyInOrder(1000, - "deleteContainer with ContainerName: ContainerName { name=container2 }", - "DeleteContainerStorage with ContainerName: ContainerName { name=container2 }")); + callOrderVerifier.assertInOrderWithAssertMessage("Maintainer did not receive call to delete application storage", + "deleteContainer with ContainerName: ContainerName { name=container2 }", + "DeleteContainerStorage with ContainerName: ContainerName { name=container2 }"); - assertTrue(callOrder.verifyInOrder(1000, + callOrderVerifier.assertInOrder( "updateNodeAttributes with HostName: host1, NodeAttributes: NodeAttributes{restartGeneration=1, dockerImage=DockerImage { imageId=image1 }, vespaVersion='null'}", "updateNodeAttributes with HostName: host2, NodeAttributes: NodeAttributes{restartGeneration=1, dockerImage=DockerImage { imageId=image2 }, vespaVersion='null'}", "markAsReady with HostName: host2", - "updateNodeAttributes with HostName: host3, NodeAttributes: NodeAttributes{restartGeneration=1, dockerImage=DockerImage { imageId=image1 }, vespaVersion='null'}")); + "updateNodeAttributes with HostName: host3, NodeAttributes: NodeAttributes{restartGeneration=1, dockerImage=DockerImage { imageId=image1 }, vespaVersion='null'}"); } - private ContainerNodeSpec addAndWaitForNode(HostName hostName, ContainerName containerName, Optional<DockerImage> dockerImage) throws InterruptedException { + private ContainerNodeSpec addAndWaitForNode(String hostName, ContainerName containerName, Optional<DockerImage> dockerImage) throws InterruptedException { ContainerNodeSpec containerNodeSpec = new ContainerNodeSpec( hostName, dockerImage, containerName, - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(1L), Optional.of(1L), Optional.of(1d), @@ -134,7 +139,7 @@ public class MultiDockerTest { Thread.sleep(10); } - assert callOrder.verifyInOrder(1000, + callOrderVerifier.assertInOrder( "createContainerCommand with DockerImage: " + dockerImage.get() + ", HostName: " + hostName + ", ContainerName: " + containerName, "executeInContainer with ContainerName: " + containerName + ", args: [/usr/bin/env, test, -x, /opt/yahoo/vespa/bin/vespa-nodectl]", "executeInContainer with ContainerName: " + containerName + ", args: [/opt/yahoo/vespa/bin/vespa-nodectl, resume]"); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java index ead4167333e..c1ee11ee4cb 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java @@ -1,13 +1,12 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.integrationTests; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.DockerImage; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAttributes; import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepository; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; +import com.yahoo.vespa.hosted.provision.Node; import java.io.IOException; import java.util.ArrayList; @@ -22,12 +21,12 @@ import java.util.stream.Collectors; */ public class NodeRepoMock implements NodeRepository { private List<ContainerNodeSpec> containerNodeSpecs = new ArrayList<>(); - private final CallOrderVerifier callOrder; + private final CallOrderVerifier callOrderVerifier; private static final Object monitor = new Object(); - public NodeRepoMock(CallOrderVerifier callOrder) { - this.callOrder = callOrder; + public NodeRepoMock(CallOrderVerifier callOrderVerifier) { + this.callOrderVerifier = callOrderVerifier; } @Override @@ -38,7 +37,7 @@ public class NodeRepoMock implements NodeRepository { } @Override - public Optional<ContainerNodeSpec> getContainerNodeSpec(HostName hostName) throws IOException { + public Optional<ContainerNodeSpec> getContainerNodeSpec(String hostName) throws IOException { synchronized (monitor) { return containerNodeSpecs.stream() .filter(containerNodeSpec -> containerNodeSpec.hostname.equals(hostName)) @@ -47,42 +46,47 @@ public class NodeRepoMock implements NodeRepository { } @Override - public void updateNodeAttributes(HostName hostName, NodeAttributes nodeAttributes) throws IOException { + public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) throws IOException { synchronized (monitor) { - callOrder.add("updateNodeAttributes with HostName: " + hostName + ", NodeAttributes: " + nodeAttributes); + callOrderVerifier.add("updateNodeAttributes with HostName: " + hostName + ", NodeAttributes: " + nodeAttributes); } } @Override - public void markAsReady(HostName hostName) throws IOException { + public void markAsReady(String hostName) throws IOException { Optional<ContainerNodeSpec> cns = getContainerNodeSpec(hostName); synchronized (monitor) { if (cns.isPresent()) { updateContainerNodeSpec(cns.get().hostname, - cns.get().wantedDockerImage, cns.get().containerName, NodeState.READY, + cns.get().wantedDockerImage, cns.get().containerName, Node.State.ready, cns.get().wantedRestartGeneration, cns.get().currentRestartGeneration, cns.get().minCpuCores, cns.get().minMainMemoryAvailableGb, cns.get().minDiskAvailableGb); } - callOrder.add("markAsReady with HostName: " + hostName); + callOrderVerifier.add("markAsReady with HostName: " + hostName); } } - public void updateContainerNodeSpec(HostName hostName, + public void updateContainerNodeSpec(String hostName, Optional<DockerImage> wantedDockerImage, ContainerName containerName, - NodeState nodeState, + Node.State nodeState, Optional<Long> wantedRestartGeneration, Optional<Long> currentRestartGeneration, Optional<Double> minCpuCores, Optional<Double> minMainMemoryAvailableGb, Optional<Double> minDiskAvailableGb) { - addContainerNodeSpec(new ContainerNodeSpec(hostName, - wantedDockerImage, containerName, nodeState, + updateContainerNodeSpec(new ContainerNodeSpec(hostName, + wantedDockerImage, containerName, nodeState, "tenant", "docker", + Optional.empty(), Optional.empty(), Optional.empty(), wantedRestartGeneration, currentRestartGeneration, minCpuCores, minMainMemoryAvailableGb, minDiskAvailableGb)); } + public void updateContainerNodeSpec(ContainerNodeSpec containerNodeSpec) { + addContainerNodeSpec(containerNodeSpec); + } + public void addContainerNodeSpec(ContainerNodeSpec containerNodeSpec) { removeContainerNodeSpec(containerNodeSpec.hostname); synchronized (monitor) { @@ -96,7 +100,7 @@ public class NodeRepoMock implements NodeRepository { } } - public void removeContainerNodeSpec(HostName hostName) { + public void removeContainerNodeSpec(String hostName) { synchronized (monitor) { containerNodeSpecs = containerNodeSpecs.stream() .filter(c -> !c.hostname.equals(hostName)) diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeStateTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeStateTest.java index 6391d9e3eec..45c8dfc5634 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeStateTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeStateTest.java @@ -1,7 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.integrationTests; -import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.metrics.simple.MetricReceiver; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.DockerImage; @@ -11,8 +12,8 @@ import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl; import com.yahoo.vespa.hosted.node.admin.docker.DockerOperationsImpl; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; import com.yahoo.vespa.hosted.node.admin.util.Environment; +import com.yahoo.vespa.hosted.provision.Node; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -26,7 +27,6 @@ import java.util.function.Function; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -38,7 +38,7 @@ import static org.mockito.Mockito.when; */ public class NodeStateTest { - private CallOrderVerifier callOrder; + private CallOrderVerifier callOrderVerifier; private NodeRepoMock nodeRepositoryMock; private DockerMock dockerMock; private ContainerNodeSpec initialContainerNodeSpec; @@ -46,25 +46,31 @@ public class NodeStateTest { @Before public void before() throws InterruptedException, UnknownHostException { - callOrder = new CallOrderVerifier(); - MaintenanceSchedulerMock maintenanceSchedulerMock = new MaintenanceSchedulerMock(callOrder); - OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder); - nodeRepositoryMock = new NodeRepoMock(callOrder); - dockerMock = new DockerMock(callOrder); + callOrderVerifier = new CallOrderVerifier(); + StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrderVerifier); + OrchestratorMock orchestratorMock = new OrchestratorMock(callOrderVerifier); + nodeRepositoryMock = new NodeRepoMock(callOrderVerifier); + dockerMock = new DockerMock(callOrderVerifier); Environment environment = mock(Environment.class); when(environment.getConfigServerHosts()).thenReturn(Collections.emptySet()); when(environment.getInetAddressForHost(any(String.class))).thenReturn(InetAddress.getByName("1.1.1.1")); - Function<HostName, NodeAgent> nodeAgentFactory = (hostName) -> - new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock); - NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100); + MetricReceiverWrapper mr = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + Function<String, NodeAgent> nodeAgentFactory = (hostName) -> + new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock, mr); + NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100, mr); initialContainerNodeSpec = new ContainerNodeSpec( - new HostName("host1"), + "host1", Optional.of(new DockerImage("dockerImage")), new ContainerName("container"), - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(1L), Optional.of(1L), Optional.of(1d), @@ -79,7 +85,7 @@ public class NodeStateTest { Thread.sleep(10); } - assert callOrder.verifyInOrder(5000, + callOrderVerifier.assertInOrder( "createContainerCommand with DockerImage: DockerImage { imageId=dockerImage }, HostName: host1, ContainerName: ContainerName { name=container }", "executeInContainer with ContainerName: ContainerName { name=container }, args: [/usr/bin/env, test, -x, /opt/yahoo/vespa/bin/vespa-nodectl]", "executeInContainer with ContainerName: ContainerName { name=container }, args: [/opt/yahoo/vespa/bin/vespa-nodectl, resume]"); @@ -98,7 +104,7 @@ public class NodeStateTest { initialContainerNodeSpec.hostname, initialContainerNodeSpec.wantedDockerImage, initialContainerNodeSpec.containerName, - NodeState.DIRTY, + Node.State.dirty, initialContainerNodeSpec.wantedRestartGeneration, initialContainerNodeSpec.currentRestartGeneration, initialContainerNodeSpec.minCpuCores, @@ -108,15 +114,15 @@ public class NodeStateTest { // Wait until it is marked ready Optional<ContainerNodeSpec> containerNodeSpec; while ((containerNodeSpec = nodeRepositoryMock.getContainerNodeSpec(initialContainerNodeSpec.hostname)).isPresent() - && containerNodeSpec.get().nodeState != NodeState.READY) { + && containerNodeSpec.get().nodeState != Node.State.ready) { Thread.sleep(10); } - assertThat(nodeRepositoryMock.getContainerNodeSpec(initialContainerNodeSpec.hostname).get().nodeState, is(NodeState.READY)); + assertThat(nodeRepositoryMock.getContainerNodeSpec(initialContainerNodeSpec.hostname).get().nodeState, is(Node.State.ready)); - assertTrue("Node set to dirty, but no stop/delete call received", callOrder.verifyInOrder(1000, - "stopContainer with ContainerName: ContainerName { name=container }", - "deleteContainer with ContainerName: ContainerName { name=container }")); + callOrderVerifier.assertInOrderWithAssertMessage("Node set to dirty, but no stop/delete call received", + "stopContainer with ContainerName: ContainerName { name=container }", + "deleteContainer with ContainerName: ContainerName { name=container }"); } @Test @@ -128,16 +134,16 @@ public class NodeStateTest { initialContainerNodeSpec.hostname, newDockerImage, initialContainerNodeSpec.containerName, - NodeState.INACTIVE, + Node.State.inactive, initialContainerNodeSpec.wantedRestartGeneration, initialContainerNodeSpec.currentRestartGeneration, initialContainerNodeSpec.minCpuCores, initialContainerNodeSpec.minMainMemoryAvailableGb, initialContainerNodeSpec.minDiskAvailableGb); - assertTrue("Node set to inactive, but no stop/delete call received", callOrder.verifyInOrder(1000, - "stopContainer with ContainerName: ContainerName { name=container }", - "deleteContainer with ContainerName: ContainerName { name=container }")); + callOrderVerifier.assertInOrderWithAssertMessage("Node set to inactive, but no stop/delete call received", + "stopContainer with ContainerName: ContainerName { name=container }", + "deleteContainer with ContainerName: ContainerName { name=container }"); // Change node state to active @@ -145,7 +151,7 @@ public class NodeStateTest { initialContainerNodeSpec.hostname, newDockerImage, initialContainerNodeSpec.containerName, - NodeState.ACTIVE, + Node.State.active, initialContainerNodeSpec.wantedRestartGeneration, initialContainerNodeSpec.currentRestartGeneration, initialContainerNodeSpec.minCpuCores, @@ -153,10 +159,10 @@ public class NodeStateTest { initialContainerNodeSpec.minDiskAvailableGb); // Check that the container is started again after the delete call - assertTrue("Node not started again after being put to active state", callOrder.verifyInOrder(1000, - "deleteContainer with ContainerName: ContainerName { name=container }", - "createContainerCommand with DockerImage: DockerImage { imageId=newDockerImage }, HostName: host1, ContainerName: ContainerName { name=container }", - "executeInContainer with ContainerName: ContainerName { name=container }, args: [/usr/bin/env, test, -x, /opt/yahoo/vespa/bin/vespa-nodectl]", - "executeInContainer with ContainerName: ContainerName { name=container }, args: [/opt/yahoo/vespa/bin/vespa-nodectl, resume]")); + callOrderVerifier.assertInOrderWithAssertMessage("Node not started again after being put to active state", + "deleteContainer with ContainerName: ContainerName { name=container }", + "createContainerCommand with DockerImage: DockerImage { imageId=newDockerImage }, HostName: host1, ContainerName: ContainerName { name=container }", + "executeInContainer with ContainerName: ContainerName { name=container }, args: [/usr/bin/env, test, -x, /opt/yahoo/vespa/bin/vespa-nodectl]", + "executeInContainer with ContainerName: ContainerName { name=container }, args: [/opt/yahoo/vespa/bin/vespa-nodectl, resume]"); } } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/OrchestratorMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/OrchestratorMock.java index 1f1d96b4357..8796a38a451 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/OrchestratorMock.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/OrchestratorMock.java @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.integrationTests; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator; import java.util.List; @@ -13,7 +12,7 @@ import java.util.Optional; * @author dybis */ public class OrchestratorMock implements Orchestrator { - private final CallOrderVerifier callOrder; + private final CallOrderVerifier callOrderVerifier; private boolean forceSingleSuspendResponse = true; private boolean forceSingleResumeResponse = true; @@ -21,21 +20,22 @@ public class OrchestratorMock implements Orchestrator { private static final Object monitor = new Object(); - public OrchestratorMock(CallOrderVerifier callOrder) { - this.callOrder = callOrder; + public OrchestratorMock(CallOrderVerifier callOrderVerifier) { + this.callOrderVerifier = callOrderVerifier; } @Override - public boolean suspend(HostName hostName) { + public boolean suspend(String hostName) { synchronized (monitor) { + callOrderVerifier.add("Suspend for " + hostName); return forceSingleSuspendResponse; } } @Override - public boolean resume(HostName hostName) { + public boolean resume(String hostName) { synchronized (monitor) { - callOrder.add("Resume for " + hostName); + callOrderVerifier.add("Resume for " + hostName); return forceSingleResumeResponse; } } @@ -43,8 +43,8 @@ public class OrchestratorMock implements Orchestrator { @Override public Optional<String> suspend(String parentHostName, List<String> hostNames) { synchronized (monitor) { - callOrder.add("Suspend with parent: " + parentHostName + " and hostnames: " + hostNames + - " - Forced response: " + forceGroupSuspendResponse); + callOrderVerifier.add("Suspend with parent: " + parentHostName + " and hostnames: " + hostNames + + " - Forced response: " + forceGroupSuspendResponse); return forceGroupSuspendResponse; } } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java new file mode 100644 index 00000000000..21078ba61d7 --- /dev/null +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java @@ -0,0 +1,93 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.node.admin.integrationTests; + +import com.yahoo.metrics.simple.MetricReceiver; +import com.yahoo.vespa.hosted.dockerapi.ContainerName; +import com.yahoo.vespa.hosted.dockerapi.DockerImage; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; +import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; +import com.yahoo.vespa.hosted.node.admin.docker.DockerOperationsImpl; +import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin; +import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl; +import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater; +import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent; +import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl; +import com.yahoo.vespa.hosted.node.admin.util.Environment; +import com.yahoo.vespa.hosted.provision.Node; +import org.junit.Test; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Collections; +import java.util.Optional; +import java.util.function.Function; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests that different wanted and current restart generation leads to execution of restart command + * + * @author musum + */ +public class RestartTest { + + @Test + public void test() throws InterruptedException, UnknownHostException { + CallOrderVerifier callOrderVerifier = new CallOrderVerifier(); + NodeRepoMock nodeRepositoryMock = new NodeRepoMock(callOrderVerifier); + StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrderVerifier); + OrchestratorMock orchestratorMock = new OrchestratorMock(callOrderVerifier); + DockerMock dockerMock = new DockerMock(callOrderVerifier); + + Environment environment = mock(Environment.class); + when(environment.getConfigServerHosts()).thenReturn(Collections.emptySet()); + when(environment.getInetAddressForHost(any(String.class))).thenReturn(InetAddress.getByName("1.1.1.1")); + + MetricReceiverWrapper mr = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + Function<String, NodeAgent> nodeAgentFactory = (hostName) -> + new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock, mr); + NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100, mr); + + long wantedRestartGeneration = 1; + long currentRestartGeneration = wantedRestartGeneration; + nodeRepositoryMock.addContainerNodeSpec(createContainerNodeSpec(wantedRestartGeneration, currentRestartGeneration)); + + NodeAdminStateUpdater updater = new NodeAdminStateUpdater(nodeRepositoryMock, nodeAdmin, 1, 1, orchestratorMock, "basehostname"); + + // Wait for node admin to be notified with node repo state and the docker container has been started + while (nodeAdmin.getListOfHosts().size() == 0) { + Thread.sleep(10); + } + + // Check that the container is started and NodeRepo has received the PATCH update + callOrderVerifier.assertInOrder("createContainerCommand with DockerImage: DockerImage { imageId=dockerImage }, HostName: host1, ContainerName: ContainerName { name=container }", + "updateNodeAttributes with HostName: host1, NodeAttributes: NodeAttributes{restartGeneration=1, dockerImage=DockerImage { imageId=dockerImage }, vespaVersion='null'}"); + + wantedRestartGeneration = 2; + currentRestartGeneration = 1; + nodeRepositoryMock.updateContainerNodeSpec(createContainerNodeSpec(wantedRestartGeneration, currentRestartGeneration)); + + callOrderVerifier.assertInOrder("Suspend for host1", + "executeInContainer with ContainerName: ContainerName { name=container }, args: [/opt/yahoo/vespa/bin/vespa-nodectl, restart]"); + updater.deconstruct(); + } + + private ContainerNodeSpec createContainerNodeSpec(long wantedRestartGeneration, long currentRestartGeneration) { + return new ContainerNodeSpec("host1", + Optional.of(new DockerImage("dockerImage")), + new ContainerName("container"), + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.of(wantedRestartGeneration), + Optional.of(currentRestartGeneration), + Optional.of(1d), + Optional.of(1d), + Optional.of(1d)); + } +} diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ResumeTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ResumeTest.java index a8e6971e51e..e7397e36b75 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ResumeTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ResumeTest.java @@ -1,7 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.integrationTests; -import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.metrics.simple.MetricReceiver; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin; import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl; @@ -11,8 +12,8 @@ import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.DockerImage; import com.yahoo.vespa.hosted.node.admin.docker.DockerOperationsImpl; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; import com.yahoo.vespa.hosted.node.admin.util.Environment; +import com.yahoo.vespa.hosted.provision.Node; import org.junit.Test; import java.net.InetAddress; @@ -36,25 +37,31 @@ import static org.mockito.Mockito.when; public class ResumeTest { @Test public void test() throws InterruptedException, UnknownHostException { - CallOrderVerifier callOrder = new CallOrderVerifier(); - NodeRepoMock nodeRepositoryMock = new NodeRepoMock(callOrder); - MaintenanceSchedulerMock maintenanceSchedulerMock = new MaintenanceSchedulerMock(callOrder); - OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder); - DockerMock dockerMock = new DockerMock(callOrder); + CallOrderVerifier callOrderVerifier = new CallOrderVerifier(); + NodeRepoMock nodeRepositoryMock = new NodeRepoMock(callOrderVerifier); + StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrderVerifier); + OrchestratorMock orchestratorMock = new OrchestratorMock(callOrderVerifier); + DockerMock dockerMock = new DockerMock(callOrderVerifier); Environment environment = mock(Environment.class); when(environment.getConfigServerHosts()).thenReturn(Collections.emptySet()); when(environment.getInetAddressForHost(any(String.class))).thenReturn(InetAddress.getByName("1.1.1.1")); - Function<HostName, NodeAgent> nodeAgentFactory = (hostName) -> - new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock); - NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100); + MetricReceiverWrapper mr = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + Function<String, NodeAgent> nodeAgentFactory = (hostName) -> + new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock, mr); + NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100, mr); nodeRepositoryMock.addContainerNodeSpec(new ContainerNodeSpec( - new HostName("host1"), + "host1", Optional.of(new DockerImage("dockerImage")), new ContainerName("container"), - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(1L), Optional.of(1L), Optional.of(1d), @@ -69,9 +76,8 @@ public class ResumeTest { } // Check that the container is started and NodeRepo has received the PATCH update - assertTrue(callOrder.verifyInOrder(1000, - "createContainerCommand with DockerImage: DockerImage { imageId=dockerImage }, HostName: host1, ContainerName: ContainerName { name=container }", - "updateNodeAttributes with HostName: host1, NodeAttributes: NodeAttributes{restartGeneration=1, dockerImage=DockerImage { imageId=dockerImage }, vespaVersion='null'}")); + callOrderVerifier.assertInOrder("createContainerCommand with DockerImage: DockerImage { imageId=dockerImage }, HostName: host1, ContainerName: ContainerName { name=container }", + "updateNodeAttributes with HostName: host1, NodeAttributes: NodeAttributes{restartGeneration=1, dockerImage=DockerImage { imageId=dockerImage }, vespaVersion='null'}"); // Force orchestrator to reject the suspend orchestratorMock.setForceGroupSuspendResponse(Optional.of("Orchestrator reject suspend")); @@ -83,7 +89,7 @@ public class ResumeTest { } assertThat(updater.setResumeStateAndCheckIfResumed(NodeAdminStateUpdater.State.SUSPENDED), is(Optional.of("Orchestrator reject suspend"))); - //Make orchestrator allow suspend callOrder + //Make orchestrator allow suspend requests orchestratorMock.setForceGroupSuspendResponse(Optional.empty()); assertThat(updater.setResumeStateAndCheckIfResumed(NodeAdminStateUpdater.State.SUSPENDED), is(Optional.empty())); @@ -102,10 +108,9 @@ public class ResumeTest { Thread.sleep(10); } - assertTrue(callOrder.verifyInOrder(1000, - "Resume for host1", - "Suspend with parent: basehostname and hostnames: [host1] - Forced response: Optional[Orchestrator reject suspend]", - "Suspend with parent: basehostname and hostnames: [host1] - Forced response: Optional.empty")); + callOrderVerifier.assertInOrder("Resume for host1", + "Suspend with parent: basehostname and hostnames: [host1] - Forced response: Optional[Orchestrator reject suspend]", + "Suspend with parent: basehostname and hostnames: [host1] - Forced response: Optional.empty"); updater.deconstruct(); } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java index db27e8b98a8..452e5bb58dc 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java @@ -3,6 +3,10 @@ package com.yahoo.vespa.hosted.node.admin.integrationTests; import com.yahoo.application.Networking; import com.yahoo.application.container.JDisc; +import com.yahoo.vespa.hosted.dockerapi.ContainerName; +import com.yahoo.vespa.hosted.dockerapi.DockerImage; +import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; +import com.yahoo.vespa.hosted.provision.Node; import org.apache.commons.io.IOUtils; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -24,7 +28,6 @@ import java.util.Optional; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; /** * @author dybis @@ -103,10 +106,28 @@ public class RunInContainerTest { waitForJdiscContainerToServe(); assertThat(doPutCall("resume"), is(true)); + // No nodes to suspend, always successful + assertThat(doPutCall("suspend"), is(true)); + + ComponentsProviderWithMocks.nodeRepositoryMock.addContainerNodeSpec(new ContainerNodeSpec( + "hostName", + Optional.of(new DockerImage("dockerImage")), + new ContainerName("container"), + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), + Optional.of(1L), + Optional.of(1L), + Optional.of(1d), + Optional.of(1d), + Optional.of(1d))); ComponentsProviderWithMocks.orchestratorMock.setForceGroupSuspendResponse(Optional.of("Denied")); assertThat(doPutCall("suspend"), is(false)); - assertTrue(ComponentsProviderWithMocks.callOrder.verifyInOrder(1000, - "Suspend with parent: localhost and hostnames: [] - Forced response: Optional[Denied]")); + ComponentsProviderWithMocks.callOrderVerifier + .assertInOrder("Suspend with parent: localhost and hostnames: [hostName] - Forced response: Optional[Denied]"); assertThat(doGetInfoCall(), is("{\"dockerHostHostName\":\"localhost\",\"NodeAdmin\":{\"isFrozen\":true,\"NodeAgents\":[]}}")); } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java new file mode 100644 index 00000000000..064f2d653bd --- /dev/null +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java @@ -0,0 +1,38 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.node.admin.integrationTests; + +import com.yahoo.vespa.hosted.dockerapi.ContainerName; +import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * @author valerijf + */ +public class StorageMaintainerMock extends StorageMaintainer { + private final CallOrderVerifier callOrderVerifier; + + public StorageMaintainerMock(CallOrderVerifier callOrderVerifier) { + this.callOrderVerifier = callOrderVerifier; + } + + @Override + public Map<String, Number> updateIfNeededAndGetDiskMetricsFor(ContainerName containerName) { + return new HashMap<>(); + } + + @Override + public void removeOldFilesFromNode(ContainerName containerName) { + } + + @Override + public void cleanNodeAdmin() { + } + + @Override + public void deleteContainerStorage(ContainerName containerName) throws IOException { + callOrderVerifier.add("DeleteContainerStorage with ContainerName: " + containerName); + } +} diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java new file mode 100644 index 00000000000..c1603a7535e --- /dev/null +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java @@ -0,0 +1,30 @@ +package com.yahoo.vespa.hosted.node.admin.maintenance; + +import com.yahoo.vespa.hosted.node.maintenance.DeleteOldAppDataTest; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; + +import static org.junit.Assert.*; + +/** + * @author dybis + */ +public class StorageMaintainerTest { + + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + @Test + public void testDiskUsed() throws IOException, InterruptedException { + int writeSize = 10000; + DeleteOldAppDataTest.writeNBytesToFile(folder.newFile(), writeSize); + + StorageMaintainer storageMaintainer = new StorageMaintainer(); + long usedBytes = storageMaintainer.getDiscUsedInBytes(folder.getRoot()); + if (usedBytes * 4 < writeSize || usedBytes > writeSize * 4) + fail("Used bytes is " + usedBytes + ", but wrote " + writeSize + " bytes, not even close."); + } +}
\ No newline at end of file diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java index e8e52af5e33..5af24e71c3d 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java @@ -2,16 +2,17 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin; import com.yahoo.collections.Pair; -import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.metrics.simple.MetricReceiver; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.dockerapi.Container; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.Docker; import com.yahoo.vespa.hosted.dockerapi.DockerImage; -import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler; +import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; +import com.yahoo.vespa.hosted.provision.Node; import org.junit.Test; import org.mockito.InOrder; @@ -43,21 +44,22 @@ public class NodeAdminImplTest { private static final Optional<Double> MIN_DISK_AVAILABLE_GB = Optional.of(1.0); // Trick to allow mocking of typed interface without casts/warnings. - private interface NodeAgentFactory extends Function<HostName, NodeAgent> {} + private interface NodeAgentFactory extends Function<String, NodeAgent> {} @Test public void nodeAgentsAreProperlyLifeCycleManaged() throws Exception { final Docker docker = mock(Docker.class); - final Function<HostName, NodeAgent> nodeAgentFactory = mock(NodeAgentFactory.class); - final MaintenanceScheduler maintenanceScheduler = mock(MaintenanceScheduler.class); + final Function<String, NodeAgent> nodeAgentFactory = mock(NodeAgentFactory.class); + final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class); - final NodeAdminImpl nodeAdmin = new NodeAdminImpl(docker, nodeAgentFactory, maintenanceScheduler, 100); + final NodeAdminImpl nodeAdmin = new NodeAdminImpl(docker, nodeAgentFactory, storageMaintainer, 100, + new MetricReceiverWrapper(MetricReceiver.nullImplementation)); final NodeAgent nodeAgent1 = mock(NodeAgentImpl.class); final NodeAgent nodeAgent2 = mock(NodeAgentImpl.class); - when(nodeAgentFactory.apply(any(HostName.class))).thenReturn(nodeAgent1).thenReturn(nodeAgent2); + when(nodeAgentFactory.apply(any(String.class))).thenReturn(nodeAgent1).thenReturn(nodeAgent2); - final HostName hostName = new HostName("host"); + final String hostName = "host"; final DockerImage dockerImage = new DockerImage("image"); final ContainerName containerName = new ContainerName("container"); final boolean isRunning = true; @@ -66,7 +68,12 @@ public class NodeAdminImplTest { hostName, Optional.of(dockerImage), containerName, - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(1L), Optional.of(1L), MIN_CPU_CORES, @@ -84,12 +91,12 @@ public class NodeAdminImplTest { inOrder.verify(nodeAgent1, never()).stop(); nodeAdmin.synchronizeNodeSpecsToNodeAgents(asList(nodeSpec), asList(existingContainer)); - inOrder.verify(nodeAgentFactory, never()).apply(any(HostName.class)); + inOrder.verify(nodeAgentFactory, never()).apply(any(String.class)); inOrder.verify(nodeAgent1, never()).start(1); // inOrder.verify(nodeAgent1).execute(NodeAgent.Command.RUN_ITERATION_NOW); inOrder.verify(nodeAgent1, never()).stop(); nodeAdmin.synchronizeNodeSpecsToNodeAgents(Collections.emptyList(), asList(existingContainer)); - inOrder.verify(nodeAgentFactory, never()).apply(any(HostName.class)); + inOrder.verify(nodeAgentFactory, never()).apply(any(String.class)); verify(nodeAgent1).stop(); nodeAdmin.synchronizeNodeSpecsToNodeAgents(asList(nodeSpec), asList(existingContainer)); @@ -98,7 +105,7 @@ public class NodeAdminImplTest { inOrder.verify(nodeAgent2, never()).stop(); nodeAdmin.synchronizeNodeSpecsToNodeAgents(Collections.emptyList(), Collections.emptyList()); - inOrder.verify(nodeAgentFactory, never()).apply(any(HostName.class)); + inOrder.verify(nodeAgentFactory, never()).apply(any(String.class)); inOrder.verify(nodeAgent2, never()).start(1); // inOrder.verify(nodeAgent2).execute(NodeAgent.Command.RUN_ITERATION_NOW); inOrder.verify(nodeAgent2).stop(); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java index 65fbdada986..a11d22a9712 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java @@ -1,24 +1,21 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin; // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + import com.yahoo.prelude.semantics.RuleBaseException; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.node.admin.integrationTests.CallOrderVerifier; import com.yahoo.vespa.hosted.node.admin.integrationTests.OrchestratorMock; import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepository; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; +import com.yahoo.vespa.hosted.provision.Node; import org.junit.Test; -import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Optional; -import java.util.concurrent.CountDownLatch; -import static junit.framework.TestCase.assertTrue; import static org.hamcrest.core.Is.is; import static org.hamcrest.junit.MatcherAssert.assertThat; import static org.mockito.Matchers.anyList; @@ -52,8 +49,8 @@ public class NodeAdminStateUpdaterTest { containersToRun.add(createSample()); when(nodeRepository.getContainersToRun()).thenReturn(containersToRun); - CallOrderVerifier callOrder = new CallOrderVerifier(); - OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder); + CallOrderVerifier callOrderVerifier = new CallOrderVerifier(); + OrchestratorMock orchestratorMock = new OrchestratorMock(callOrderVerifier); NodeAdminStateUpdater refresher = new NodeAdminStateUpdater( nodeRepository, nodeAdmin, Long.MAX_VALUE, Long.MAX_VALUE, orchestratorMock, "basehostname"); @@ -86,10 +83,15 @@ public class NodeAdminStateUpdaterTest { private ContainerNodeSpec createSample() { return new ContainerNodeSpec( - new HostName("hostname"), + "hostname", Optional.empty(), new ContainerName("containername"), - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java index 367a6281ac8..bac1e24ba3f 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java @@ -1,24 +1,32 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.nodeagent; -import com.yahoo.vespa.applicationmodel.HostName; -import com.yahoo.vespa.hosted.dockerapi.Container; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.yahoo.metrics.simple.MetricReceiver; import com.yahoo.vespa.hosted.dockerapi.ContainerName; +import com.yahoo.vespa.hosted.dockerapi.ContainerStatsImpl; import com.yahoo.vespa.hosted.dockerapi.Docker; import com.yahoo.vespa.hosted.dockerapi.DockerImage; -import com.yahoo.vespa.hosted.dockerapi.ProcessResult; +import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations; -import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler; +import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer; import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepository; -import com.yahoo.vespa.hosted.node.admin.noderepository.NodeState; import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator; import com.yahoo.vespa.hosted.node.admin.orchestrator.OrchestratorException; +import com.yahoo.vespa.hosted.provision.Node; import org.junit.Test; import org.mockito.InOrder; +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; import java.util.Optional; +import java.util.Set; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Matchers.eq; @@ -38,18 +46,15 @@ public class NodeAgentImplTest { private static final Optional<Double> MIN_MAIN_MEMORY_AVAILABLE_GB = Optional.of(1.0); private static final Optional<Double> MIN_DISK_AVAILABLE_GB = Optional.of(1.0); - private static final Optional<Container> NO_CONTAINER = Optional.empty(); - - private static final ProcessResult NODE_PROGRAM_DOESNT_EXIST = new ProcessResult(1, "", ""); - - private final HostName hostName = new HostName("hostname"); - private final Docker docker = mock(Docker.class); // TODO: Remove: Use dockerOperations only + private final String hostName = "hostname"; private final DockerOperations dockerOperations = mock(DockerOperations.class); private final NodeRepository nodeRepository = mock(NodeRepository.class); private final Orchestrator orchestrator = mock(Orchestrator.class); - private final MaintenanceScheduler maintenanceScheduler = mock(MaintenanceScheduler.class); + private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class); + private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); - private final NodeAgentImpl nodeAgent = new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations, maintenanceScheduler); + private final NodeAgentImpl nodeAgent = new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations, + storageMaintainer, metricReceiver); @Test public void upToDateContainerIsUntouched() throws Exception { @@ -60,15 +65,21 @@ public class NodeAgentImplTest { hostName, Optional.of(dockerImage), containerName, - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(restartGeneration), Optional.of(restartGeneration), MIN_CPU_CORES, MIN_MAIN_MEMORY_AVAILABLE_GB, MIN_DISK_AVAILABLE_GB); - final boolean isRunning = true; final String vespaVersion = "7.8.9"; + Docker.ContainerStats containerStats = new ContainerStatsImpl(new HashMap<>(), new HashMap<>(), new HashMap<>(), new HashMap<>()); + when(dockerOperations.getContainerStats(any())).thenReturn(containerStats); when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(false); when(dockerOperations.removeContainerIfNeeded(eq(nodeSpec), eq(hostName), any())).thenReturn(false); when(dockerOperations.startContainerIfNeeded(eq(nodeSpec))).thenReturn(false); @@ -77,7 +88,7 @@ public class NodeAgentImplTest { nodeAgent.tick(); - verify(orchestrator, never()).suspend(any(HostName.class)); + verify(orchestrator, never()).suspend(any(String.class)); verify(dockerOperations, never()).scheduleDownloadOfImage(any(), any()); final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository); @@ -102,15 +113,21 @@ public class NodeAgentImplTest { hostName, Optional.of(dockerImage), containerName, - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(restartGeneration), Optional.of(restartGeneration), MIN_CPU_CORES, MIN_MAIN_MEMORY_AVAILABLE_GB, MIN_DISK_AVAILABLE_GB); - final boolean isRunning = true; final String vespaVersion = "7.8.9"; + Docker.ContainerStats containerStats = new ContainerStatsImpl(new HashMap<>(), new HashMap<>(), new HashMap<>(), new HashMap<>()); + when(dockerOperations.getContainerStats(any())).thenReturn(containerStats); when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(false); when(dockerOperations.removeContainerIfNeeded(eq(nodeSpec), eq(hostName), any())).thenReturn(true); when(dockerOperations.startContainerIfNeeded(eq(nodeSpec))).thenReturn(true); @@ -119,7 +136,7 @@ public class NodeAgentImplTest { nodeAgent.tick(); - verify(orchestrator, never()).suspend(any(HostName.class)); + verify(orchestrator, never()).suspend(any(String.class)); verify(dockerOperations, never()).scheduleDownloadOfImage(any(), any()); final InOrder inOrder = inOrder(dockerOperations, orchestrator, nodeRepository); @@ -143,21 +160,25 @@ public class NodeAgentImplTest { hostName, Optional.of(newDockerImage), containerName, - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(wantedRestartGeneration), Optional.of(currentRestartGeneration), MIN_CPU_CORES, MIN_MAIN_MEMORY_AVAILABLE_GB, MIN_DISK_AVAILABLE_GB); - final String vespaVersion = "7.8.9"; when(dockerOperations.shouldScheduleDownloadOfImage(any())).thenReturn(true); when(nodeRepository.getContainerNodeSpec(hostName)).thenReturn(Optional.of(nodeSpec)); nodeAgent.tick(); - verify(orchestrator, never()).suspend(any(HostName.class)); - verify(orchestrator, never()).resume(any(HostName.class)); + verify(orchestrator, never()).suspend(any(String.class)); + verify(orchestrator, never()).resume(any(String.class)); verify(dockerOperations, never()).removeContainerIfNeeded(eq(nodeSpec), eq(hostName), any()); final InOrder inOrder = inOrder(dockerOperations); @@ -175,7 +196,12 @@ public class NodeAgentImplTest { hostName, Optional.of(dockerImage), containerName, - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(wantedRestartGeneration), Optional.of(currentRestartGeneration), MIN_CPU_CORES, @@ -189,12 +215,11 @@ public class NodeAgentImplTest { try { nodeAgent.tick(); fail("Expected to throw an exception"); - } catch (Exception e) { - } + } catch (Exception ignored) { } verify(dockerOperations, never()).startContainerIfNeeded(eq(nodeSpec)); - verify(orchestrator, never()).resume(any(HostName.class)); - verify(nodeRepository, never()).updateNodeAttributes(any(HostName.class), any(NodeAttributes.class)); + verify(orchestrator, never()).resume(any(String.class)); + verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class)); } @Test @@ -206,7 +231,12 @@ public class NodeAgentImplTest { hostName, Optional.of(dockerImage), containerName, - NodeState.FAILED, + Node.State.failed, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(restartGeneration), Optional.of(restartGeneration), MIN_CPU_CORES, @@ -218,8 +248,8 @@ public class NodeAgentImplTest { nodeAgent.tick(); verify(dockerOperations, times(1)).removeContainerIfNeeded(eq(nodeSpec), eq(hostName), any()); - verify(orchestrator, never()).resume(any(HostName.class)); - verify(nodeRepository, never()).updateNodeAttributes(any(HostName.class), any(NodeAttributes.class)); + verify(orchestrator, never()).resume(any(String.class)); + verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class)); } @Test @@ -231,7 +261,12 @@ public class NodeAgentImplTest { hostName, Optional.of(dockerImage), containerName, - NodeState.INACTIVE, + Node.State.inactive, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(restartGeneration), Optional.of(restartGeneration), MIN_CPU_CORES, @@ -242,15 +277,15 @@ public class NodeAgentImplTest { nodeAgent.tick(); - final InOrder inOrder = inOrder(maintenanceScheduler, dockerOperations); - inOrder.verify(maintenanceScheduler, times(1)).removeOldFilesFromNode(eq(containerName)); + final InOrder inOrder = inOrder(storageMaintainer, dockerOperations); + inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName)); inOrder.verify(dockerOperations, times(1)).removeContainerIfNeeded(eq(nodeSpec), eq(hostName), any()); - verify(orchestrator, never()).resume(any(HostName.class)); - verify(nodeRepository, never()).updateNodeAttributes(any(HostName.class), any(NodeAttributes.class)); + verify(orchestrator, never()).resume(any(String.class)); + verify(nodeRepository, never()).updateNodeAttributes(any(String.class), any(NodeAttributes.class)); } - private void nodeRunningContainerIsTakenDownAndCleanedAndRecycled(NodeState nodeState, Optional<Long> wantedRestartGeneration) + private void nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State nodeState, Optional<Long> wantedRestartGeneration) throws Exception { final DockerImage dockerImage = new DockerImage("dockerImage"); final ContainerName containerName = new ContainerName("container-name"); @@ -259,6 +294,11 @@ public class NodeAgentImplTest { Optional.of(dockerImage), containerName, nodeState, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), wantedRestartGeneration, wantedRestartGeneration, //currentRestartGeneration MIN_CPU_CORES, @@ -269,45 +309,50 @@ public class NodeAgentImplTest { nodeAgent.tick(); - final InOrder inOrder = inOrder(maintenanceScheduler, dockerOperations, nodeRepository); - inOrder.verify(maintenanceScheduler, times(1)).removeOldFilesFromNode(eq(containerName)); + final InOrder inOrder = inOrder(storageMaintainer, dockerOperations, nodeRepository); + inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName)); inOrder.verify(dockerOperations, times(1)).removeContainerIfNeeded(eq(nodeSpec), eq(hostName), any()); - inOrder.verify(maintenanceScheduler, times(1)).deleteContainerStorage(eq(containerName)); + inOrder.verify(storageMaintainer, times(1)).deleteContainerStorage(eq(containerName)); inOrder.verify(nodeRepository, times(1)).markAsReady(eq(hostName)); verify(dockerOperations, never()).startContainerIfNeeded(any()); - verify(orchestrator, never()).resume(any(HostName.class)); + verify(orchestrator, never()).resume(any(String.class)); // current Docker image and vespa version should be cleared verify(nodeRepository, times(1)).updateNodeAttributes( - any(HostName.class), eq(new NodeAttributes().withDockerImage(new DockerImage("")).withVespaVersion(""))); + any(String.class), eq(new NodeAttributes().withDockerImage(new DockerImage("")).withVespaVersion(""))); } @Test public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycled() throws Exception { - nodeRunningContainerIsTakenDownAndCleanedAndRecycled(NodeState.DIRTY, Optional.of(1L)); + nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.of(1L)); } @Test public void dirtyNodeRunningContainerIsTakenDownAndCleanedAndRecycledNoRestartGeneration() throws Exception { - nodeRunningContainerIsTakenDownAndCleanedAndRecycled(NodeState.DIRTY, Optional.empty()); + nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.dirty, Optional.empty()); } @Test public void provisionedNodeWithNoContainerIsCleanedAndRecycled() throws Exception { - nodeRunningContainerIsTakenDownAndCleanedAndRecycled(NodeState.PROVISIONED, Optional.of(1L)); + nodeRunningContainerIsTakenDownAndCleanedAndRecycled(Node.State.provisioned, Optional.of(1L)); } @Test public void resumeProgramRunsUntilSuccess() throws Exception { final long restartGeneration = 1; - final HostName hostName = new HostName("hostname"); + final String hostName = "hostname"; final DockerImage wantedDockerImage = new DockerImage("wantedDockerImage"); final ContainerName containerName = new ContainerName("container-name"); final ContainerNodeSpec nodeSpec = new ContainerNodeSpec( hostName, Optional.of(wantedDockerImage), containerName, - NodeState.ACTIVE, + Node.State.active, + "tenant", + "docker", + Optional.empty(), + Optional.empty(), + Optional.empty(), Optional.of(restartGeneration), Optional.of(restartGeneration), MIN_CPU_CORES, @@ -315,6 +360,8 @@ public class NodeAgentImplTest { MIN_DISK_AVAILABLE_GB); final String vespaVersion = "7.8.9"; + Docker.ContainerStats containerStats = new ContainerStatsImpl(new HashMap<>(), new HashMap<>(), new HashMap<>(), new HashMap<>()); + when(dockerOperations.getContainerStats(any())).thenReturn(containerStats); when(nodeRepository.getContainerNodeSpec(eq(hostName))).thenReturn(Optional.of(nodeSpec)); when(dockerOperations.shouldScheduleDownloadOfImage(eq(wantedDockerImage))).thenReturn(false); when(dockerOperations.removeContainerIfNeeded(eq(nodeSpec), eq(hostName), any())).thenReturn(true); @@ -330,8 +377,7 @@ public class NodeAgentImplTest { try { nodeAgent.tick(); fail("Expected to throw an exception"); - } catch (RuntimeException e) { - } + } catch (RuntimeException ignored) { } inOrder.verify(dockerOperations, times(1)).executeResume(any()); inOrder.verifyNoMoreInteractions(); @@ -343,4 +389,42 @@ public class NodeAgentImplTest { inOrder.verify(orchestrator).resume(hostName); inOrder.verifyNoMoreInteractions(); } + + @Test + @SuppressWarnings("unchecked") + public void testGetRelevantMetrics() throws IOException { + final ObjectMapper objectMapper = new ObjectMapper(); + ClassLoader classLoader = getClass().getClassLoader(); + File statsFile = new File(classLoader.getResource("docker.stats.json").getFile()); + Map<String, Object> dockerStats = objectMapper.readValue(statsFile, Map.class); + + Map<String, Object> networks = (Map<String, Object>) dockerStats.get("networks"); + Map<String, Object> cpu_stats = (Map<String, Object>) dockerStats.get("cpu_stats"); + Map<String, Object> memory_stats = (Map<String, Object>) dockerStats.get("memory_stats"); + Map<String, Object> blkio_stats = (Map<String, Object>) dockerStats.get("blkio_stats"); + Docker.ContainerStats stats = new ContainerStatsImpl(networks, cpu_stats, memory_stats, blkio_stats); + + final ContainerName containerName = new ContainerName("cont-name"); + when(dockerOperations.getContainerStats(eq(containerName))).thenReturn(stats); + + Optional<String> version = Optional.of("1.2.3"); + ContainerNodeSpec.Owner owner = new ContainerNodeSpec.Owner("tester", "testapp", "testinstance"); + ContainerNodeSpec.Membership membership = new ContainerNodeSpec.Membership("clustType", "clustId", "grp", 3, false); + nodeAgent.lastNodeSpec = new ContainerNodeSpec(hostName, null, containerName, Node.State.active, "tenants", + "docker", version, Optional.of(owner), Optional.of(membership), null, null, null, null, null); + + nodeAgent.updateContainerNodeMetrics(); + + Set<Map<String, Object>> actualMetrics = new HashSet<>(); + for (MetricReceiverWrapper.DimensionMetrics dimensionMetrics : metricReceiver) { + Map<String, Object> metrics = objectMapper.readValue(dimensionMetrics.toSecretAgentReport(), Map.class); + metrics.remove("timestamp"); // Remove timestamp so we can test against expected map + actualMetrics.add(metrics); + } + + File expectedMetricsFile = new File(classLoader.getResource("docker.stats.metrics.expected.json").getFile()); + Set<Map<String, Object>> expectedMetrics = objectMapper.readValue(expectedMetricsFile, Set.class); + + assertEquals(expectedMetrics, actualMetrics); + } } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImplTest.java index 544a6a5fbb2..fedacfa9e13 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImplTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImplTest.java @@ -5,11 +5,11 @@ package com.yahoo.vespa.hosted.node.admin.noderepository; import com.google.common.collect.Sets; import com.yahoo.application.Networking; import com.yahoo.application.container.JDisc; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec; import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.DockerImage; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAttributes; +import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.testutils.ContainerConfig; import org.junit.After; @@ -36,7 +36,7 @@ import static org.junit.Assert.fail; public class NodeRepositoryImplTest { private JDisc container; private int port; - private final Set<HostName> configServerHosts = Sets.newHashSet(new HostName("127.0.0.1")); + private final Set<String> configServerHosts = Sets.newHashSet("127.0.0.1"); private int findRandomOpenPort() throws IOException { @@ -61,7 +61,7 @@ public class NodeRepositoryImplTest { private void waitForJdiscContainerToServe() throws InterruptedException { Instant start = Instant.now(); - NodeRepository nodeRepositoryApi = new NodeRepositoryImpl(Sets.newHashSet(new HostName("127.0.0.1")), port, "foobar"); + NodeRepository nodeRepositoryApi = new NodeRepositoryImpl(Sets.newHashSet("127.0.0.1"), port, "foobar"); while (Instant.now().minusSeconds(120).isBefore(start)) { try { nodeRepositoryApi.getContainersToRun(); @@ -87,10 +87,10 @@ public class NodeRepositoryImplTest { final List<ContainerNodeSpec> containersToRun = nodeRepositoryApi.getContainersToRun(); assertThat(containersToRun.size(), is(1)); final ContainerNodeSpec nodeSpec = containersToRun.get(0); - assertThat(nodeSpec.hostname, is(new HostName("host4.yahoo.com"))); + assertThat(nodeSpec.hostname, is("host4.yahoo.com")); assertThat(nodeSpec.wantedDockerImage.get(), is(new DockerImage("image-123"))); assertThat(nodeSpec.containerName, is(new ContainerName("host4"))); - assertThat(nodeSpec.nodeState, is(NodeState.RESERVED)); + assertThat(nodeSpec.nodeState, is(Node.State.reserved)); assertThat(nodeSpec.wantedRestartGeneration.get(), is(0L)); assertThat(nodeSpec.currentRestartGeneration.get(), is(0L)); assertThat(nodeSpec.minCpuCores.get(), is(2.0)); @@ -102,7 +102,7 @@ public class NodeRepositoryImplTest { public void testGetContainers() throws InterruptedException, IOException { waitForJdiscContainerToServe(); NodeRepository nodeRepositoryApi = new NodeRepositoryImpl(configServerHosts, port, "dockerhost4"); - HostName hostname = new HostName("host4.yahoo.com"); + String hostname = "host4.yahoo.com"; Optional<ContainerNodeSpec> nodeSpec = nodeRepositoryApi.getContainerNodeSpec(hostname); assertThat(nodeSpec.isPresent(), is(true)); assertThat(nodeSpec.get().hostname, is(hostname)); @@ -113,7 +113,7 @@ public class NodeRepositoryImplTest { public void testUpdateNodeAttributes() throws InterruptedException, IOException { waitForJdiscContainerToServe(); NodeRepository nodeRepositoryApi = new NodeRepositoryImpl(configServerHosts, port, "dockerhost4"); - HostName hostname = new HostName("host4.yahoo.com"); + String hostname = "host4.yahoo.com"; nodeRepositoryApi.updateNodeAttributes( hostname, new NodeAttributes() @@ -126,7 +126,7 @@ public class NodeRepositoryImplTest { public void testUpdateNodeAttributesWithBadValue() throws InterruptedException, IOException { waitForJdiscContainerToServe(); NodeRepository nodeRepositoryApi = new NodeRepositoryImpl(configServerHosts, port, "dockerhost4"); - HostName hostname = new HostName("host4.yahoo.com"); + String hostname = "host4.yahoo.com"; nodeRepositoryApi.updateNodeAttributes( hostname, new NodeAttributes() @@ -140,17 +140,17 @@ public class NodeRepositoryImplTest { NodeRepository nodeRepositoryApi = new NodeRepositoryImpl(configServerHosts, port, "dockerhost4"); waitForJdiscContainerToServe(); - nodeRepositoryApi.markAsReady(new HostName("host55.yahoo.com")); + nodeRepositoryApi.markAsReady("host55.yahoo.com"); try { - nodeRepositoryApi.markAsReady(new HostName("host1.yahoo.com")); + nodeRepositoryApi.markAsReady("host1.yahoo.com"); fail("Expected failure because host1 is not registered as provisioned, dirty, failed or parked"); } catch (RuntimeException ignored) { // expected } try { - nodeRepositoryApi.markAsReady(new HostName("host101.yahoo.com")); + nodeRepositoryApi.markAsReady("host101.yahoo.com"); fail("Expected failure because host101 does not exist"); } catch (RuntimeException ignored) { // expected diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java index 3ced0eaeac5..39af637a45a 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.orchestrator; -import com.yahoo.vespa.applicationmodel.HostName; import com.yahoo.vespa.hosted.node.admin.util.ConfigServerHttpRequestExecutor; import com.yahoo.vespa.orchestrator.restapi.wire.BatchHostSuspendRequest; import com.yahoo.vespa.orchestrator.restapi.wire.BatchOperationResult; @@ -21,7 +20,7 @@ import static org.mockito.Mockito.*; * @author valerijf */ public class OrchestratorImplTest { - private static final HostName hostName = new HostName("host123.yahoo.com"); + private static final String hostName = "host123.yahoo.com"; private ConfigServerHttpRequestExecutor requestExecutor; private OrchestratorImpl orchestrator; @@ -35,10 +34,10 @@ public class OrchestratorImplTest { public void testSuspendCall() { when(requestExecutor.put( OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName+ "/suspended", - OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT, + OrchestratorImpl.WEB_SERVICE_PORT, Optional.empty(), UpdateHostResponse.class - )).thenReturn(new UpdateHostResponse(hostName.s(), null)); + )).thenReturn(new UpdateHostResponse(hostName, null)); boolean response = orchestrator.suspend(hostName); assertTrue("Expected Orchestrator to approve", response); @@ -48,10 +47,10 @@ public class OrchestratorImplTest { public void testSuspendCallWithFailureReason() { when(requestExecutor.put( OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName+ "/suspended", - OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT, + OrchestratorImpl.WEB_SERVICE_PORT, Optional.empty(), UpdateHostResponse.class - )).thenReturn(new UpdateHostResponse(hostName.s(), new HostStateChangeDenialReason("hostname", "service", "fail"))); + )).thenReturn(new UpdateHostResponse(hostName, new HostStateChangeDenialReason("hostname", "service", "fail"))); boolean response = orchestrator.suspend(hostName); assertFalse("Expected Orchestrator to deny when presented with HostChangeDenialReason", response); @@ -88,9 +87,9 @@ public class OrchestratorImplTest { public void testResumeCall() { when(requestExecutor.delete( OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName+ "/suspended", - OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT, + OrchestratorImpl.WEB_SERVICE_PORT, UpdateHostResponse.class - )).thenReturn(new UpdateHostResponse(hostName.s(), null)); + )).thenReturn(new UpdateHostResponse(hostName, null)); boolean response = orchestrator.resume(hostName); assertTrue("Expected Orchestrator to approve", response); @@ -100,9 +99,9 @@ public class OrchestratorImplTest { public void testResumeCallWithFailureReason() { when(requestExecutor.delete( OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName+ "/suspended", - OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT, + OrchestratorImpl.WEB_SERVICE_PORT, UpdateHostResponse.class - )).thenReturn(new UpdateHostResponse(hostName.s(), new HostStateChangeDenialReason("hostname", "service", "fail"))); + )).thenReturn(new UpdateHostResponse(hostName, new HostStateChangeDenialReason("hostname", "service", "fail"))); boolean response = orchestrator.resume(hostName); assertFalse("Expected Orchestrator to deny when presented with HostChangeDenialReason", response); @@ -141,7 +140,7 @@ public class OrchestratorImplTest { when(requestExecutor.put( OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, - OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT, + OrchestratorImpl.WEB_SERVICE_PORT, Optional.of(new BatchHostSuspendRequest(parentHostName, hostNames)), BatchOperationResult.class )).thenReturn(BatchOperationResult.successResult()); @@ -158,7 +157,7 @@ public class OrchestratorImplTest { when(requestExecutor.put( OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, - OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT, + OrchestratorImpl.WEB_SERVICE_PORT, Optional.of(new BatchHostSuspendRequest(parentHostName, hostNames)), BatchOperationResult.class )).thenReturn(new BatchOperationResult(failureReason)); @@ -175,7 +174,7 @@ public class OrchestratorImplTest { when(requestExecutor.put( OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API, - OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT, + OrchestratorImpl.WEB_SERVICE_PORT, Optional.of(new BatchHostSuspendRequest(parentHostName, hostNames)), BatchOperationResult.class )).thenThrow(new RuntimeException(exceptionMessage)); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutorTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutorTest.java index b544a6cc0d4..eb8cd4edeb4 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutorTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutorTest.java @@ -4,7 +4,6 @@ package com.yahoo.vespa.hosted.node.admin.util; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import com.yahoo.collections.ArraySet; -import com.yahoo.vespa.applicationmodel.HostName; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.StatusLine; @@ -63,9 +62,9 @@ public class ConfigServerHttpRequestExecutorTest { @Test public void testBasicParsingSingleServer() throws Exception { - Set<HostName> configServers = new ArraySet<>(2); - configServers.add(HostName.apply("host1")); - configServers.add(HostName.apply("host2")); + Set<String> configServers = new ArraySet<>(2); + configServers.add("host1"); + configServers.add("host2"); ConfigServerHttpRequestExecutor executor = new ConfigServerHttpRequestExecutor(configServers, createClientMock()); TestPojo answer = executor.get("/path", 666, TestPojo.class); assertThat(answer.foo, is("bar")); @@ -74,9 +73,9 @@ public class ConfigServerHttpRequestExecutorTest { @Test public void testBasicFailureWithNoRetries() throws Exception { - Set<HostName> configServers = new ArraySet<>(2); - configServers.add(HostName.apply("host1")); - configServers.add(HostName.apply("host2")); + Set<String> configServers = new ArraySet<>(2); + configServers.add("host1"); + configServers.add("host2"); // Server is returning 400, no retries. mockReturnCode = 400; ConfigServerHttpRequestExecutor executor = new ConfigServerHttpRequestExecutor(configServers, createClientMock()); @@ -91,9 +90,9 @@ public class ConfigServerHttpRequestExecutorTest { @Test public void testRetries() throws Exception { - Set<HostName> configServers = new ArraySet<>(2); - configServers.add(HostName.apply("host1")); - configServers.add(HostName.apply("host2")); + Set<String> configServers = new ArraySet<>(2); + configServers.add("host1"); + configServers.add("host2"); // Client is throwing exception, should be retries. mockReturnCode = 100000; ConfigServerHttpRequestExecutor executor = new ConfigServerHttpRequestExecutor(configServers, createClientMock()); @@ -109,9 +108,9 @@ public class ConfigServerHttpRequestExecutorTest { @Test public void testNotFound() throws Exception { - Set<HostName> configServers = new ArraySet<>(2); - configServers.add(HostName.apply("host1")); - configServers.add(HostName.apply("host2")); + Set<String> configServers = new ArraySet<>(2); + configServers.add("host1"); + configServers.add("host2"); // Server is returning 404, special exception is thrown. mockReturnCode = 404; ConfigServerHttpRequestExecutor executor = new ConfigServerHttpRequestExecutor(configServers, createClientMock()); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/SecretAgentScheduleMakerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/SecretAgentScheduleMakerTest.java new file mode 100644 index 00000000000..471101a85dc --- /dev/null +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/SecretAgentScheduleMakerTest.java @@ -0,0 +1,77 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.node.admin.util; + +import org.junit.Test; + +import java.nio.file.Paths; + +import static org.junit.Assert.assertEquals; + +/** + * @author valerijf + */ +public class SecretAgentScheduleMakerTest { + + @Test + public void generateFullSecretAgentScheduleTest() { + SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("system-checks", 60, + Paths.get("/some/test"), "arg1", "arg2 with space") + .withTag("tenantName", "vespa") + .withTag("app", "canary-docker.default") + .withTag("clustertype", "container") + .withTag("clusterid", "canary") + .withTag("vespaVersion", "6.13.37") + .withTag("role", "tenants") + .withTag("flavor", "docker") + .withTag("state", "active") + .withTag("zone", "test.us-west-5"); + + assertEquals( + "- id: system-checks\n" + + " interval: 60\n" + + " user: nobody\n" + + " check: /some/test\n" + + " args: \n" + + " - arg1\n" + + " - arg2 with space\n" + + " tags:\n" + + " namespace: Vespa\n" + + " tenantName: vespa\n" + + " app: canary-docker.default\n" + + " clustertype: container\n" + + " clusterid: canary\n" + + " vespaVersion: 6.13.37\n" + + " role: tenants\n" + + " flavor: docker\n" + + " state: active\n" + + " zone: test.us-west-5\n", scheduleMaker.toString()); + } + + @Test + public void generateMinimalSecretAgentScheduleTest() { + SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("system-checks", 60, + Paths.get("/some/test")); + + assertEquals( + "- id: system-checks\n" + + " interval: 60\n" + + " user: nobody\n" + + " check: /some/test\n" + + " tags:\n" + + " namespace: Vespa\n", scheduleMaker.toString()); + } + + @Test + public void generateSecretAgentScheduleWithDifferentUserTest() { + SecretAgentScheduleMaker scheduleMaker = new SecretAgentScheduleMaker("system-checks", 60, + Paths.get("/some/test")).withRunAsUser("yahoo"); + + assertEquals( + "- id: system-checks\n" + + " interval: 60\n" + + " user: yahoo\n" + + " check: /some/test\n" + + " tags:\n" + + " namespace: Vespa\n", scheduleMaker.toString()); + } +} diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/maintenance/DeleteOldAppDataTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/maintenance/DeleteOldAppDataTest.java index 84922852365..462216ea827 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/maintenance/DeleteOldAppDataTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/maintenance/DeleteOldAppDataTest.java @@ -1,15 +1,14 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.maintenance; -import org.apache.commons.lang3.StringUtils; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.File; -import java.io.FileWriter; import java.io.IOException; +import java.nio.file.Files; import java.time.Duration; import java.util.Arrays; @@ -157,13 +156,13 @@ public class DeleteOldAppDataTest { initSubDirectories(); File temp1 = new File(folder.getRoot(), "small_file"); - writeNBytesToFiles(temp1, 50); + writeNBytesToFile(temp1, 50); File temp2 = new File(folder.getRoot(), "some_file"); - writeNBytesToFiles(temp2, 20); + writeNBytesToFile(temp2, 20); File temp3 = new File(folder.getRoot(), "test_folder1/some_other_file"); - writeNBytesToFiles(temp3, 75); + writeNBytesToFile(temp3, 75); DeleteOldAppData.deleteFilesLargerThan(folder.getRoot(), 10); @@ -238,9 +237,7 @@ public class DeleteOldAppDataTest { return total; } - private static void writeNBytesToFiles(File file, int nBytes) throws IOException { - try (FileWriter writer = new FileWriter(file)) { - writer.write(StringUtils.repeat("0", nBytes)); - } + public static void writeNBytesToFile(File file, int nBytes) throws IOException { + Files.write(file.toPath(), new byte[nBytes]); } } diff --git a/node-admin/src/test/resources/docker.stats.json b/node-admin/src/test/resources/docker.stats.json new file mode 100644 index 00000000000..1691736f11a --- /dev/null +++ b/node-admin/src/test/resources/docker.stats.json @@ -0,0 +1,376 @@ +{ + "read":"2016-10-05T07:28:17.228361751Z", + "precpu_stats":{ + "cpu_usage":{ + "total_usage":332026268601, + "percpu_usage":[ + 46767331191, + 46637593621, + 36196010351, + 38846420953, + 44237804850, + 35751912062, + 44546685143, + 39042510430 + ], + "usage_in_kernelmode":44040000000, + "usage_in_usermode":158940000000 + }, + "system_cpu_usage":5876874910000000, + "throttling_data":{ + "periods":3212, + "throttled_periods":322, + "throttled_time":4490 + } + }, + "cpu_stats":{ + "cpu_usage":{ + "total_usage":332131205198, + "percpu_usage":[ + 46774047576, + 46639554407, + 36204346956, + 38879143616, + 44256258947, + 35760086876, + 44567865660, + 39049901160 + ], + "usage_in_kernelmode":44050000000, + "usage_in_usermode":158950000000 + }, + "system_cpu_usage":5876882680000000, + "throttling_data":{ + "periods":3242, + "throttled_periods":332, + "throttled_time":4523 + } + }, + "memory_stats":{ + "usage":1752707072, + "max_usage":1818116096, + "stats":{ + "active_anon":1326051328, + "active_file":188919808, + "cache":426680320, + "hierarchical_memory_limit":4294967296, + "hierarchical_memsw_limit":8589934592, + "inactive_anon":0, + "inactive_file":237735936, + "mapped_file":62976000, + "pgfault":3102812, + "pgmajfault":1403, + "pgpgin":1691151, + "pgpgout":1263244, + "rss":1326026752, + "rss_huge":0, + "swap":0, + "total_active_anon":1326051328, + "total_active_file":188919808, + "total_cache":426680320, + "total_inactive_anon":0, + "total_inactive_file":237735936, + "total_mapped_file":62976000, + "total_pgfault":3102812, + "total_pgmajfault":1403, + "total_pgpgin":1691151, + "total_pgpgout":1263244, + "total_rss":1326026752, + "total_rss_huge":0, + "total_swap":0, + "total_unevictable":0, + "unevictable":0 + }, + "failcnt":0, + "limit":4294967296 + }, + "blkio_stats":{ + "io_service_bytes_recursive":[ + { + "major":252, + "minor":0, + "op":"Read", + "value":53248 + }, + { + "major":252, + "minor":0, + "op":"Write", + "value":602112 + }, + { + "major":252, + "minor":0, + "op":"Sync", + "value":0 + }, + { + "major":252, + "minor":0, + "op":"Async", + "value":655360 + }, + { + "major":252, + "minor":0, + "op":"Total", + "value":655360 + }, + { + "major":7, + "minor":0, + "op":"Read", + "value":308224 + }, + { + "major":7, + "minor":0, + "op":"Write", + "value":573440 + }, + { + "major":7, + "minor":0, + "op":"Sync", + "value":0 + }, + { + "major":7, + "minor":0, + "op":"Async", + "value":881664 + }, + { + "major":7, + "minor":0, + "op":"Total", + "value":881664 + }, + { + "major":253, + "minor":0, + "op":"Read", + "value":308224 + }, + { + "major":253, + "minor":0, + "op":"Write", + "value":573440 + }, + { + "major":253, + "minor":0, + "op":"Sync", + "value":0 + }, + { + "major":253, + "minor":0, + "op":"Async", + "value":881664 + }, + { + "major":253, + "minor":0, + "op":"Total", + "value":881664 + }, + { + "major":253, + "minor":3, + "op":"Read", + "value":343847936 + }, + { + "major":253, + "minor":3, + "op":"Write", + "value":786432 + }, + { + "major":253, + "minor":3, + "op":"Sync", + "value":131072 + }, + { + "major":253, + "minor":3, + "op":"Async", + "value":344503296 + }, + { + "major":253, + "minor":3, + "op":"Total", + "value":344634368 + } + ], + "io_serviced_recursive":[ + { + "major":252, + "minor":0, + "op":"Read", + "value":13 + }, + { + "major":252, + "minor":0, + "op":"Write", + "value":147 + }, + { + "major":252, + "minor":0, + "op":"Sync", + "value":0 + }, + { + "major":252, + "minor":0, + "op":"Async", + "value":160 + }, + { + "major":252, + "minor":0, + "op":"Total", + "value":160 + }, + { + "major":7, + "minor":0, + "op":"Read", + "value":37 + }, + { + "major":7, + "minor":0, + "op":"Write", + "value":124 + }, + { + "major":7, + "minor":0, + "op":"Sync", + "value":0 + }, + { + "major":7, + "minor":0, + "op":"Async", + "value":161 + }, + { + "major":7, + "minor":0, + "op":"Total", + "value":161 + }, + { + "major":253, + "minor":0, + "op":"Read", + "value":37 + }, + { + "major":253, + "minor":0, + "op":"Write", + "value":124 + }, + { + "major":253, + "minor":0, + "op":"Sync", + "value":0 + }, + { + "major":253, + "minor":0, + "op":"Async", + "value":161 + }, + { + "major":253, + "minor":0, + "op":"Total", + "value":161 + }, + { + "major":253, + "minor":3, + "op":"Read", + "value":11812 + }, + { + "major":253, + "minor":3, + "op":"Write", + "value":142 + }, + { + "major":253, + "minor":3, + "op":"Sync", + "value":2 + }, + { + "major":253, + "minor":3, + "op":"Async", + "value":11952 + }, + { + "major":253, + "minor":3, + "op":"Total", + "value":11954 + } + ], + "io_queue_recursive":[ + + ], + "io_service_time_recursive":[ + + ], + "io_wait_time_recursive":[ + + ], + "io_merged_recursive":[ + + ], + "io_time_recursive":[ + + ], + "sectors_recursive":[ + + ] + }, + "pids_stats":{ + + }, + "networks":{ + "eth0":{ + "rx_bytes":19499270, + "rx_packets":58913, + "rx_errors":0, + "rx_dropped":0, + "tx_bytes":20303455, + "tx_packets":62319, + "tx_errors":0, + "tx_dropped":0 + }, + "eth1":{ + "rx_bytes":3245766, + "rx_packets":23462, + "rx_errors":0, + "rx_dropped":0, + "tx_bytes":54246745, + "tx_packets":34562, + "tx_errors":0, + "tx_dropped":0 + } + } +}
\ No newline at end of file diff --git a/node-admin/src/test/resources/docker.stats.metrics.expected.json b/node-admin/src/test/resources/docker.stats.metrics.expected.json new file mode 100644 index 00000000000..a11a44f8fbd --- /dev/null +++ b/node-admin/src/test/resources/docker.stats.metrics.expected.json @@ -0,0 +1,61 @@ +[ + { + "application":"docker", + "dimensions":{ + "flavor":"docker", + "app":"testapp", + "clustertype":"clustType", + "role":"tenants", + "tenantName":"tester", + "host":"hostname", + "vespaVersion":"1.2.3", + "state":"active", + "clusterid":"clustId", + "interface":"eth1" + }, + "metrics":{ + "node.network.bytes_sent":5.4246745E7, + "node.network.bytes_rcvd":3245766.0 + } + }, + { + "application":"docker", + "dimensions":{ + "flavor":"docker", + "app":"testapp", + "clustertype":"clustType", + "role":"tenants", + "tenantName":"tester", + "host":"hostname", + "vespaVersion":"1.2.3", + "state":"active", + "clusterid":"clustId" + }, + "metrics":{ + "node.cpu.system_cpu_usage":5.87688268E15, + "node.cpu.total_usage":3.32131205198E11, + "node.cpu.throttled_time": 4523.0, + "node.memory.usage":1.752707072E9, + "node.memory.limit":4.294967296E9 + } + }, + { + "application":"docker", + "dimensions":{ + "flavor":"docker", + "app":"testapp", + "clustertype":"clustType", + "role":"tenants", + "tenantName":"tester", + "host":"hostname", + "vespaVersion":"1.2.3", + "state":"active", + "clusterid":"clustId", + "interface":"eth0" + }, + "metrics":{ + "node.network.bytes_sent":2.0303455E7, + "node.network.bytes_rcvd":1.949927E7 + } + } +]
\ No newline at end of file diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java index 0f3a87ff585..80333dcd2d4 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterMembership; +import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.hosted.provision.node.Allocation; import com.yahoo.vespa.hosted.provision.node.Flavor; import com.yahoo.vespa.hosted.provision.node.History; @@ -29,7 +30,7 @@ public final class Node { private final Flavor flavor; private final Status status; private final State state; - private final Type type; + private final NodeType type; /** Record of the last event of each type happening to this node */ private final History history; @@ -38,20 +39,20 @@ public final class Node { private Optional<Allocation> allocation; /** Creates a node in the initial state (provisioned) */ - public static Node create(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, Type type) { + public static Node create(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, NodeType type) { return new Node(openStackId, hostname, parentHostname, flavor, Status.initial(), State.provisioned, Optional.empty(), History.empty(), type); } /** Do not use. Construct nodes by calling {@link NodeRepository#createNode} */ public Node(String openStackId, String hostname, Optional<String> parentHostname, - Flavor flavor, Status status, State state, Allocation allocation, History history, Type type) { + Flavor flavor, Status status, State state, Allocation allocation, History history, NodeType type) { this(openStackId, hostname, parentHostname, flavor, status, state, Optional.of(allocation), history, type); } public Node(String openStackId, String hostname, Optional<String> parentHostname, Flavor flavor, Status status, State state, Optional<Allocation> allocation, - History history, Type type) { + History history, NodeType type) { Objects.requireNonNull(openStackId, "A node must have an openstack id"); Objects.requireNonNull(hostname, "A node must have a hostname"); Objects.requireNonNull(parentHostname, "A null parentHostname is not permitted."); @@ -93,14 +94,14 @@ public final class Node { /** Returns the flavor of this node */ public Flavor flavor() { return flavor; } - /** Returns the known information about the nodes ephemeral status */ + /** Returns the known information about the node's ephemeral status */ public Status status() { return status; } /** Returns the current state of this node (in the node state machine) */ public State state() { return state; } /** Returns the type of this node */ - public Type type() { return type; } + public NodeType type() { return type; } /** Returns the current allocation of this, if any */ public Optional<Allocation> allocation() { return allocation; } @@ -130,7 +131,7 @@ public final class Node { return with(allocation.get().unretire()); } - /** Returns a copy of this with the current generation set to generation */ + /** Returns a copy of this with the current restart generation set to generation */ public Node withRestart(Generation generation) { final Optional<Allocation> allocation = this.allocation; if ( ! allocation.isPresent()) @@ -145,7 +146,7 @@ public final class Node { } /** Returns a node with the type assigned to the given value */ - public Node with(Type type) { + public Node with(NodeType type) { return new Node(openStackId, hostname, parentHostname, flavor, status, state, allocation, history, type); } @@ -154,7 +155,7 @@ public final class Node { return new Node(openStackId, hostname, parentHostname, flavor, status, state, allocation, history, type); } - /** Returns a copy of this with the current generation set to generation */ + /** Returns a copy of this with the current reboot generation set to generation */ public Node withReboot(Generation generation) { return new Node(openStackId, hostname, parentHostname, flavor, status.withReboot(generation), state, allocation, history, type); @@ -217,7 +218,7 @@ public final class Node { public enum State { - /** This node has been requested (from OpenStack) but is not yet read for use */ + /** This node has been requested (from OpenStack) but is not yet ready for use */ provisioned, /** This node is free and ready for use */ @@ -251,17 +252,4 @@ public final class Node { } } - public enum Type { - - /** A host of a set of (docker) tenant nodes */ - host, - - /** Nodes running the shared proxy layer */ - proxy, - - /** A node to be assigned to a tenant to run application workloads */ - tenant - - } - } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java index 606a5914e11..eee0eae4e83 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java @@ -5,6 +5,7 @@ import com.google.inject.Inject; import com.yahoo.collections.ListMap; import com.yahoo.component.AbstractComponent; import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.NodeType; import com.yahoo.transaction.Mutex; import com.yahoo.transaction.NestedTransaction; import com.yahoo.vespa.curator.Curator; @@ -91,13 +92,22 @@ public class NodeRepository extends AbstractComponent { } /** + * Returns all nodes in any of the given states. + * + * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned + * @return the node, or empty if it was not found in any of the given states + */ + public List<Node> getNodes(Node.State ... inState) { + return zkClient.getNodes(inState).stream().collect(Collectors.toList()); + } + /** * Finds and returns the nodes of the given type in any of the given states. * * @param type the node type to return * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned * @return the node, or empty if it was not found in any of the given states */ - public List<Node> getNodes(Node.Type type, Node.State ... inState) { + public List<Node> getNodes(NodeType type, Node.State ... inState) { return zkClient.getNodes(inState).stream().filter(node -> node.type().equals(type)).collect(Collectors.toList()); } public List<Node> getNodes(ApplicationId id, Node.State ... inState) { return zkClient.getNodes(id, inState); } @@ -114,7 +124,7 @@ public class NodeRepository extends AbstractComponent { /** Creates a new node object, without adding it to the node repo */ public Node createNode(String openStackId, String hostname, Optional<String> parentHostname, - Flavor flavor, Node.Type type) { + Flavor flavor, NodeType type) { return Node.create(openStackId, hostname, parentHostname, flavor, type); } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/assimilate/PopulateClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/assimilate/PopulateClient.java index 14305692664..422eee820db 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/assimilate/PopulateClient.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/assimilate/PopulateClient.java @@ -95,7 +95,7 @@ public class PopulateClient { Node.State.active, Optional.empty() /* Allocation */, History.empty(), - Node.Type.tenant) // History + NodeType.tenant) // History .allocate( ApplicationId.from( diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java index ce7ae429c40..32478473111 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java @@ -4,9 +4,9 @@ package com.yahoo.vespa.hosted.provision.maintenance; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.Deployer; import com.yahoo.config.provision.Deployment; +import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeRepository; -import com.yahoo.yolean.Exceptions; import java.time.Duration; import java.util.Optional; @@ -35,7 +35,7 @@ public class ApplicationMaintainer extends Maintainer { @Override protected void maintain() { Set<ApplicationId> applications = - nodeRepository().getNodes(Node.Type.tenant, Node.State.active).stream().map(node -> node.allocation().get().owner()).collect(Collectors.toSet()); + nodeRepository().getNodes(Node.State.active).stream().map(node -> node.allocation().get().owner()).collect(Collectors.toSet()); for (ApplicationId application : applications) { try { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java index 495ff3b756f..5b6cc3b11a3 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java @@ -1,6 +1,7 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.maintenance; +import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.node.History; @@ -47,7 +48,7 @@ public abstract class Expirer extends Maintainer { @Override protected void maintain() { List<Node> expired = new ArrayList<>(); - for (Node node : nodeRepository().getNodes(Node.Type.tenant, fromState)) { + for (Node node : nodeRepository().getNodes(fromState)) { Optional<History.Event> event = node.history().event(eventType); if (event.isPresent() && event.get().at().plus(expiryTime).isBefore(clock.instant())) expired.add(node); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java index 4b16c0947cb..f194d9d53fd 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java @@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.provision.maintenance; import com.yahoo.config.provision.Deployer; import com.yahoo.config.provision.Deployment; import com.yahoo.config.provision.HostLivenessTracker; +import com.yahoo.config.provision.NodeType; import com.yahoo.transaction.Mutex; import com.yahoo.vespa.applicationmodel.ApplicationInstance; import com.yahoo.vespa.applicationmodel.ServiceCluster; @@ -86,7 +87,7 @@ public class NodeFailer extends Maintainer { // Active nodes for (Node node : determineActiveNodeDownStatus()) { Instant graceTimeEnd = node.history().event(History.Event.Type.down).get().at().plus(downTimeLimit); - if (graceTimeEnd.isBefore(clock.instant()) && ! applicationSuspended(node)) + if (graceTimeEnd.isBefore(clock.instant()) && ! applicationSuspended(node) && failAllowedFor(node.type())) failActive(node); } } @@ -95,7 +96,7 @@ public class NodeFailer extends Maintainer { // Update node last request events through ZooKeeper to collect request to all config servers. // We do this here ("lazily") to avoid writing to zk for each config request. try (Mutex lock = nodeRepository().lockUnallocated()) { - for (Node node : nodeRepository().getNodes(Node.Type.tenant, Node.State.ready)) { + for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if ( ! lastLocalRequest.isPresent()) continue; @@ -118,7 +119,7 @@ public class NodeFailer extends Maintainer { // Add 10 minutes to the down time limit to allow nodes to make a request that infrequently. Instant oldestAcceptableRequestTime = clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); - return nodeRepository().getNodes(Node.Type.tenant, Node.State.ready).stream() + return nodeRepository().getNodes(Node.State.ready).stream() .filter(node -> wasMadeReadyBefore(oldestAcceptableRequestTime, node)) .filter(node -> ! hasRecordedRequestAfter(oldestAcceptableRequestTime, node)) .collect(Collectors.toList()); @@ -137,7 +138,7 @@ public class NodeFailer extends Maintainer { } private List<Node> readyNodesWithHardwareFailure() { - return nodeRepository().getNodes(Node.Type.tenant, Node.State.ready).stream() + return nodeRepository().getNodes(Node.State.ready).stream() .filter(node -> node.status().hardwareFailure().isPresent()) .collect(Collectors.toList()); } @@ -153,6 +154,17 @@ public class NodeFailer extends Maintainer { } /** + * We can attempt to fail any number of *tenant* nodes because the operation will not be effected unless + * the node is replaced. + * However, nodes of other types are not replaced (because all of the type are used by a single application), + * so we only allow one to be in failed at any point in time to protect against runaway failing. + */ + private boolean failAllowedFor(NodeType nodeType) { + if (nodeType == NodeType.tenant) return true; + return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; + } + + /** * If the node is positively DOWN, and there is no "down" history record, we add it. * If the node is positively UP we remove any "down" history record. * @@ -164,7 +176,7 @@ public class NodeFailer extends Maintainer { for (ServiceCluster<ServiceMonitorStatus> cluster : application.serviceClusters()) { for (ServiceInstance<ServiceMonitorStatus> service : cluster.serviceInstances()) { Optional<Node> node = nodeRepository().getNode(service.hostName().s(), Node.State.active); - if ( ! node.isPresent()) continue; // we also get status from infrastructure nodes, which are not in the repo + if ( ! node.isPresent()) continue; // we also get status from infrastructure nodes, which are not in the repo. TODO: remove when proxy nodes are in node repo everywhere if (service.serviceStatus().equals(ServiceMonitorStatus.DOWN)) downNodes.add(recordAsDown(node.get())); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java index 3e6881b912b..fa3bdeea776 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java @@ -105,7 +105,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent { redeployFrequency = Duration.ofMinutes(30); zooKeeperAccessMaintenanceInterval = Duration.ofSeconds(10); reservationExpiry = Duration.ofMinutes(10); // Need to be long enough for deployment to be finished for all config model versions - inactiveExpiry = Duration.ofMinutes(1); + inactiveExpiry = Duration.ofSeconds(2); // support interactive wipe start over retiredExpiry = Duration.ofMinutes(1); failedExpiry = Duration.ofMinutes(10); dirtyExpiry = Duration.ofMinutes(30); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ZooKeeperAccessMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ZooKeeperAccessMaintainer.java index 085e3e4dac8..d558555512f 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ZooKeeperAccessMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ZooKeeperAccessMaintainer.java @@ -1,5 +1,6 @@ package com.yahoo.vespa.hosted.provision.maintenance; +import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.curator.Curator; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeRepository; @@ -31,9 +32,9 @@ public class ZooKeeperAccessMaintainer extends Maintainer { protected void maintain() { StringBuilder hostList = new StringBuilder(); - for (Node node : nodeRepository().getNodes(Node.Type.tenant)) + for (Node node : nodeRepository().getNodes(NodeType.tenant)) hostList.append(node.hostname()).append(","); - for (Node node : nodeRepository().getNodes(Node.Type.proxy)) + for (Node node : nodeRepository().getNodes(NodeType.proxy)) hostList.append(node.hostname()).append(","); for (String hostPort : curator.connectionSpec().split(",")) hostList.append(hostPort.split(":")[0]).append(","); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/monitoring/ProvisionMetrics.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/monitoring/ProvisionMetrics.java index b1e0df929f4..18f4765fa03 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/monitoring/ProvisionMetrics.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/monitoring/ProvisionMetrics.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.hosted.provision.monitoring; import com.yahoo.component.AbstractComponent; +import com.yahoo.config.provision.NodeType; import com.yahoo.jdisc.Metric; import com.yahoo.log.LogLevel; import com.yahoo.vespa.hosted.provision.Node; @@ -50,10 +51,11 @@ public class ProvisionMetrics extends AbstractComponent { log.log(LogLevel.DEBUG, "Running provision metrics task"); try { for (Node.State state : Node.State.values()) - metric.set("hostedVespa." + state.name() + "Hosts", nodeRepository.getNodes(Node.Type.tenant, state).size(), null); + metric.set("hostedVespa." + state.name() + "Hosts", nodeRepository.getNodes(NodeType.tenant, state).size(), null); } catch (RuntimeException e) { log.log(LogLevel.INFO, "Failed gathering metrics data: " + e.getMessage()); } } } + } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Flavor.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Flavor.java index 0893ca75f92..1039beea7c0 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Flavor.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Flavor.java @@ -17,6 +17,7 @@ public class Flavor { private final String name; private final int cost; + private final boolean isStock; private final Type type; private final double minCpuCores; private final double minMainMemoryAvailableGb; @@ -32,6 +33,7 @@ public class Flavor { this.name = flavorConfig.name(); this.replacesFlavors = new ArrayList<>(); this.cost = flavorConfig.cost(); + this.isStock = flavorConfig.stock(); this.type = Type.valueOf(flavorConfig.environment()); this.minCpuCores = flavorConfig.minCpuCores(); this.minMainMemoryAvailableGb = flavorConfig.minMainMemoryAvailableGb(); @@ -39,6 +41,7 @@ public class Flavor { this.description = flavorConfig.description(); } + /** Returns the unique identity of this flavor */ public String name() { return name; } /** @@ -47,26 +50,18 @@ public class Flavor { * @return Monthly cost in USD */ public int cost() { return cost; } + + public boolean isStock() { return isStock; } - public double getMinMainMemoryAvailableGb() { - return minMainMemoryAvailableGb; - } + public double getMinMainMemoryAvailableGb() { return minMainMemoryAvailableGb; } - public double getMinDiskAvailableGb() { - return minDiskAvailableGb; - } + public double getMinDiskAvailableGb() { return minDiskAvailableGb; } - public double getMinCpuCores() { - return minCpuCores; - } + public double getMinCpuCores() { return minCpuCores; } - public String getDescription() { - return description; - } + public String getDescription() { return description; } - public Type getType() { - return type; - } + public Type getType() { return type; } /** * Returns the canonical name of this flavor - which is the name which should be used as an interface to users. @@ -78,11 +73,16 @@ public class Flavor { * * The logic is that we can use this to capture the gritty details of configurations in exact flavor names * but also encourage users to refer to them by a common name by letting such flavor variants declare that they - * replace the canonical name we want. However, if a node replaces multiple names, it means that a former - * flavor distinction has become obsolete so this name becomes one of the canonical names users should refer to. + * replace the canonical name we want. However, if a node replaces multiple names, we have no basis for choosing one + * of them as the canonical, so we return the current as canonical. */ public String canonicalName() { - return replacesFlavors.size() == 1 ? replacesFlavors.get(0).canonicalName() : name; + return isCanonical() ? name : replacesFlavors.get(0).canonicalName(); + } + + /** Returns whether this is a canonical flavor */ + public boolean isCanonical() { + return replacesFlavors.size() != 1; } /** diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java index 7fb3abb5b8e..d85347847ae 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java @@ -6,6 +6,7 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ApplicationName; import com.yahoo.config.provision.ClusterMembership; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.TenantName; import com.yahoo.slime.ArrayTraverser; import com.yahoo.slime.Cursor; @@ -279,15 +280,15 @@ public class NodeSerializer { throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined"); } - private Node.Type nodeTypeFromString(String typeString) { + private NodeType nodeTypeFromString(String typeString) { switch (typeString) { - case "tenant" : return Node.Type.tenant; - case "host" : return Node.Type.host; - case "proxy" : return Node.Type.proxy; + case "tenant" : return NodeType.tenant; + case "host" : return NodeType.host; + case "proxy" : return NodeType.proxy; default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'"); } } - private String toString(Node.Type type) { + private String toString(NodeType type) { switch (type) { case tenant: return "tenant"; case host: return "host"; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java index 6665833c1a2..a759a8fca37 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java @@ -31,7 +31,7 @@ public class CapacityPolicies { switch(zone.environment()) { case dev : case test : return 1; - case perf : return Math.min(requestedCapacity.nodeCount(), 10); // TODO: Decrease to 3 when isRequired is implemented + case perf : return Math.min(requestedCapacity.nodeCount(), 3); case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10); case prod : return ensureRedundancy(requestedCapacity.nodeCount()); default : throw new IllegalArgumentException("Unsupported environment " + zone.environment()); @@ -53,7 +53,7 @@ public class CapacityPolicies { /** * Throw if the node count is 1 - + * * @return the argument node count * @throws IllegalArgumentException if only one node is requested */ diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java index 583111b9d65..ea205a15040 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java @@ -10,7 +10,6 @@ import com.yahoo.lang.MutableInteger; import com.yahoo.transaction.Mutex; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeRepository; -import com.yahoo.vespa.hosted.provision.node.Flavor; import java.time.Clock; import java.util.ArrayList; @@ -46,8 +45,7 @@ class GroupPreparer { * * @param application the application we are allocating to * @param cluster the cluster and group we are allocating to - * @param nodeCount the desired number of nodes to return - * @param flavor the desired flavor of those nodes + * @param requestedNodes a specification of the requested nodes * @param surplusActiveNodes currently active nodes which are available to be assigned to this group. * This method will remove from this list if it finds it needs additional nodes * @param highestIndex the current highest node index among all active nodes in this cluster. @@ -57,58 +55,71 @@ class GroupPreparer { // Note: This operation may make persisted changes to the set of reserved and inactive nodes, // but it may not change the set of active nodes, as the active nodes must stay in sync with the // active config model which is changed on activate - public List<Node> prepare(ApplicationId application, ClusterSpec cluster, int nodeCount, Flavor flavor, List<Node> surplusActiveNodes, MutableInteger highestIndex) { + public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, + List<Node> surplusActiveNodes, MutableInteger highestIndex) { try (Mutex lock = nodeRepository.lock(application)) { - NodeList nodeList = new NodeList(application, cluster, nodeCount, flavor, highestIndex); + NodeList nodeList = new NodeList(application, cluster, requestedNodes, highestIndex); // Use active nodes nodeList.offer(nodeRepository.getNodes(application, Node.State.active), !canChangeGroup); - if (nodeList.satisfied()) return nodeList.finalNodes(surplusActiveNodes); + if (nodeList.saturated()) return nodeList.finalNodes(surplusActiveNodes); // Use active nodes from other groups that will otherwise be retired - List<Node> accepted = nodeList.offer(sortNodeListByCost(surplusActiveNodes), canChangeGroup); + List<Node> accepted = nodeList.offer(prioritizeNodes(surplusActiveNodes, requestedNodes), canChangeGroup); surplusActiveNodes.removeAll(accepted); - if (nodeList.satisfied()) return nodeList.finalNodes(surplusActiveNodes); + if (nodeList.saturated()) return nodeList.finalNodes(surplusActiveNodes); // Use previously reserved nodes nodeList.offer(nodeRepository.getNodes(application, Node.State.reserved), !canChangeGroup); - if (nodeList.satisfied()) return nodeList.finalNodes(surplusActiveNodes); + if (nodeList.saturated()) return nodeList.finalNodes(surplusActiveNodes); // Use inactive nodes - accepted = nodeList.offer(sortNodeListByCost(nodeRepository.getNodes(application, Node.State.inactive)), !canChangeGroup); + accepted = nodeList.offer(prioritizeNodes(nodeRepository.getNodes(application, Node.State.inactive), requestedNodes), !canChangeGroup); nodeList.update(nodeRepository.reserve(accepted)); - if (nodeList.satisfied()) return nodeList.finalNodes(surplusActiveNodes); + if (nodeList.saturated()) return nodeList.finalNodes(surplusActiveNodes); // Use new, ready nodes. Lock ready pool to ensure that nodes are not grabbed by others. try (Mutex readyLock = nodeRepository.lockUnallocated()) { - List<Node> readyNodes = nodeRepository.getNodes(Node.Type.tenant, Node.State.ready); - accepted = nodeList.offer(stripeOverHosts(sortNodeListByCost(readyNodes)), !canChangeGroup); + List<Node> readyNodes = nodeRepository.getNodes(requestedNodes.type(), Node.State.ready); + accepted = nodeList.offer(stripeOverHosts(prioritizeNodes(readyNodes, requestedNodes)), !canChangeGroup); nodeList.update(nodeRepository.reserve(accepted)); } - if (nodeList.satisfied()) return nodeList.finalNodes(surplusActiveNodes); - if (nodeList.whatAboutUsingRetiredNodes()) { - throw new OutOfCapacityException("Could not satisfy request for " + nodeCount + - " nodes of " + flavor + " for " + cluster + + if (nodeList.fullfilled()) return nodeList.finalNodes(surplusActiveNodes); + + // Could not be fulfilled + if (nodeList.wouldBeFulfilledWithRetiredNodes()) + throw new OutOfCapacityException("Could not satisfy " + requestedNodes + " for " + cluster + " because we want to retire existing nodes."); - } - if (nodeList.whatAboutUsingVMs()) { - throw new OutOfCapacityException("Could not satisfy request for " + nodeCount + - " nodes of " + flavor + " for " + cluster + + else if (nodeList.wouldBeFulfilledWithClashingParentHost()) + throw new OutOfCapacityException("Could not satisfy " + requestedNodes + " for " + cluster + " because too many have same parentHost."); - } - throw new OutOfCapacityException("Could not satisfy request for " + nodeCount + - " nodes of " + flavor + " for " + cluster + "."); + else + throw new OutOfCapacityException("Could not satisfy " + requestedNodes + " for " + cluster + "."); } } - /** Sort nodes according to their cost, and if the cost is equal, sort by hostname (to get stable tests) */ - private List<Node> sortNodeListByCost(List<Node> nodeList) { - Collections.sort(nodeList, (n1, n2) -> ComparisonChain.start() - .compare(n1.flavor().cost(), n2.flavor().cost()) - .compare(n1.hostname(), n2.hostname()) - .result() - ); + /** + * Returns the node list in prioritized order, where the nodes we would most prefer the application + * to use comes first + */ + private List<Node> prioritizeNodes(List<Node> nodeList, NodeSpec nodeSpec) { + if ( nodeSpec.specifiesNonStockFlavor()) { // sort by exact before inexact flavor match, increasing cost, hostname + Collections.sort(nodeList, (n1, n2) -> ComparisonChain.start() + .compareTrueFirst(nodeSpec.matchesExactly(n1.flavor()), nodeSpec.matchesExactly(n2.flavor())) + .compare(n1.flavor().cost(), n2.flavor().cost()) + .compare(n1.hostname(), n2.hostname()) + .result() + ); + } + else { // sort by increasing cost, hostname + Collections.sort(nodeList, (n1, n2) -> ComparisonChain.start() + .compareTrueFirst(nodeSpec.matchesExactly(n1.flavor()), nodeSpec.matchesExactly(n1.flavor())) + .compare(n1.flavor().cost(), n2.flavor().cost()) + .compare(n1.hostname(), n2.hostname()) + .result() + ); + } return nodeList; } @@ -159,11 +170,8 @@ class GroupPreparer { /** The cluster this list is for */ private final ClusterSpec cluster; - /** The requested capacity of the list */ - private final int requestedNodes; - - /** The requested node flavor */ - private final Flavor requestedFlavor; + /** The requested nodes of this list */ + private final NodeSpec requestedNodes; /** The nodes this has accepted so far */ private final Set<Node> nodes = new LinkedHashSet<>(); @@ -183,11 +191,10 @@ class GroupPreparer { /** The next membership index to assign to a new node */ private MutableInteger highestIndex; - public NodeList(ApplicationId application, ClusterSpec cluster, int requestedNodes, Flavor requestedFlavor, MutableInteger highestIndex) { + public NodeList(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, MutableInteger highestIndex) { this.application = application; this.cluster = cluster; this.requestedNodes = requestedNodes; - this.requestedFlavor = requestedFlavor; this.highestIndex = highestIndex; } @@ -210,7 +217,7 @@ class GroupPreparer { ClusterMembership membership = offered.allocation().get().membership(); if ( ! offered.allocation().get().owner().equals(application)) continue; // wrong application if ( ! membership.cluster().equalsIgnoringGroup(cluster)) continue; // wrong cluster id/type - if ( (! canChangeGroup || satisfied()) && ! membership.cluster().group().equals(cluster.group())) continue; // wrong group and we can't or have no reason to change it + if ((! canChangeGroup || saturated()) && ! membership.cluster().group().equals(cluster.group())) continue; // wrong group and we can't or have no reason to change it if ( offered.allocation().get().isRemovable()) continue; // don't accept; causes removal if ( indexes.contains(membership.index())) continue; // duplicate index (just to be sure) @@ -218,10 +225,10 @@ class GroupPreparer { if ( offeredNodeHasParentHostnameAlreadyAccepted(this.nodes, offered)) wantToRetireNode = true; if ( !hasCompatibleFlavor(offered)) wantToRetireNode = true; - if ( ( !satisfied() && hasCompatibleFlavor(offered)) || acceptToRetire(offered) ) + if ((!saturated() && hasCompatibleFlavor(offered)) || acceptToRetire(offered) ) accepted.add(acceptNode(offered, wantToRetireNode)); } - else if (! satisfied() && hasCompatibleFlavor(offered)) { + else if (! saturated() && hasCompatibleFlavor(offered)) { if ( offeredNodeHasParentHostnameAlreadyAccepted(this.nodes, offered)) { ++rejectedWithClashingParentHost; continue; @@ -268,7 +275,7 @@ class GroupPreparer { } private boolean hasCompatibleFlavor(Node node) { - return node.flavor().satisfies(requestedFlavor); + return requestedNodes.isCompatible(node.flavor()); } /** Updates the state of some existing nodes in this list by replacing them by id with the given instances. */ @@ -305,17 +312,22 @@ class GroupPreparer { return node.with(node.allocation().get().with(membership)); } - /** Returns true if we have accepted at least the requested number of nodes of the requested flavor */ - public boolean satisfied() { - return acceptedOfRequestedFlavor >= requestedNodes; + /** Returns true if no more nodes are needed in this list */ + public boolean saturated() { + return requestedNodes.saturatedBy(acceptedOfRequestedFlavor); + } + + /** Returns true if the content of this list is sufficient to meet the request */ + public boolean fullfilled() { + return requestedNodes.fulfilledBy(acceptedOfRequestedFlavor); } - public boolean whatAboutUsingRetiredNodes() { - return acceptedOfRequestedFlavor + wasRetiredJustNow >= requestedNodes; + public boolean wouldBeFulfilledWithRetiredNodes() { + return requestedNodes.fulfilledBy(acceptedOfRequestedFlavor + wasRetiredJustNow); } - public boolean whatAboutUsingVMs() { - return acceptedOfRequestedFlavor + rejectedWithClashingParentHost >= requestedNodes; + public boolean wouldBeFulfilledWithClashingParentHost() { + return requestedNodes.fulfilledBy(acceptedOfRequestedFlavor + rejectedWithClashingParentHost); } /** @@ -329,7 +341,7 @@ class GroupPreparer { */ public List<Node> finalNodes(List<Node> surplusNodes) { long currentRetired = nodes.stream().filter(node -> node.allocation().get().membership().retired()).count(); - long surplus = nodes.size() - requestedNodes - currentRetired; + long surplus = requestedNodes.surplusGiven(nodes.size()) - currentRetired; List<Node> changedNodes = new ArrayList<>(); if (surplus > 0) { // retire until surplus is 0, prefer to retire higher indexes to minimize redistribution diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java index 5924e3fcb18..8e6c9f0c4ee 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java @@ -7,6 +7,7 @@ import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.HostFilter; import com.yahoo.config.provision.HostSpec; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.ProvisionLogger; import com.yahoo.config.provision.Provisioner; import com.yahoo.config.provision.Zone; @@ -61,19 +62,33 @@ public class NodeRepositoryProvisioner implements Provisioner { * The nodes are ordered by increasing index number. */ @Override - public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requestedCapacity, int groups, ProvisionLogger logger) { - log.log(LogLevel.DEBUG, () -> "Received deploy prepare request for " + requestedCapacity + " in " + - groups + " groups for application " + application + ", cluster " + cluster); - - Flavor flavor = capacityPolicies.decideFlavor(requestedCapacity, cluster); - int nodeCount = capacityPolicies.decideSize(requestedCapacity); - int effectiveGroups = groups > nodeCount ? nodeCount : groups; // cannot have more groups than nodes + public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity requestedCapacity, + int wantedGroups, ProvisionLogger logger) { + if (cluster.group().isPresent()) throw new IllegalArgumentException("Node requests cannot specify a group"); + if (requestedCapacity.nodeCount() > 0 && requestedCapacity.nodeCount() % wantedGroups != 0) + throw new IllegalArgumentException("Requested " + requestedCapacity.nodeCount() + " nodes in " + wantedGroups + " groups, " + + "which doesn't allow the nodes to be divided evenly into groups"); - if (zone.environment().isManuallyDeployed() && nodeCount < requestedCapacity.nodeCount()) - logger.log(Level.WARNING, "Requested " + requestedCapacity.nodeCount() + " nodes for " + cluster + - ", downscaling to " + nodeCount + " nodes in " + zone.environment()); - - return asSortedHosts(preparer.prepare(application, cluster, nodeCount, flavor, effectiveGroups)); + log.log(LogLevel.DEBUG, () -> "Received deploy prepare request for " + requestedCapacity + " in " + + wantedGroups + " groups for application " + application + ", cluster " + cluster); + + int effectiveGroups; + NodeSpec requestedNodes; + if ( requestedCapacity.type() == NodeType.tenant) { + int nodeCount = capacityPolicies.decideSize(requestedCapacity); + if (zone.environment().isManuallyDeployed() && nodeCount < requestedCapacity.nodeCount()) + logger.log(Level.INFO, "Requested " + requestedCapacity.nodeCount() + " nodes for " + cluster + + ", downscaling to " + nodeCount + " nodes in " + zone.environment()); + Flavor flavor = capacityPolicies.decideFlavor(requestedCapacity, cluster); + effectiveGroups = wantedGroups > nodeCount ? nodeCount : wantedGroups; // cannot have more groups than nodes + requestedNodes = NodeSpec.from(nodeCount, flavor); + } + else { + requestedNodes = NodeSpec.from(requestedCapacity.type()); + effectiveGroups = 1; // type request with multiple groups is not supported + } + + return asSortedHosts(preparer.prepare(application, cluster, requestedNodes, effectiveGroups)); } @Override diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java new file mode 100644 index 00000000000..2ce364daa07 --- /dev/null +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java @@ -0,0 +1,128 @@ +package com.yahoo.vespa.hosted.provision.provisioning; + +import com.yahoo.config.provision.NodeType; +import com.yahoo.vespa.hosted.provision.node.Flavor; + +import java.util.Objects; + +/** + * A specification of a set of nodes. + * This reflects that nodes can be requested either by count and flavor or by type, + * and encapsulates the differences in logic between these two cases. + * + * @author bratseth + */ +public interface NodeSpec { + + /** The node type this requests */ + NodeType type(); + + /** Returns whether the given flavor is compatible with this spec */ + boolean isCompatible(Flavor flavor); + + /** Returns whether the given flavor is exactly specified by this node spec */ + boolean matchesExactly(Flavor flavor); + + /** Returns whether this requests a non-stock flavor */ + boolean specifiesNonStockFlavor(); + + /** Returns whether the given node count is sufficient to consider this spec fulfilled to the maximum amount */ + boolean saturatedBy(int count); + + /** Returns whether the given node count is sufficient to fulfill this spec */ + boolean fulfilledBy(int count); + + /** Returns the amount the given count is above the minimum amount needed to fulfill this request */ + int surplusGiven(int count); + + /** Returns a specification of a fraction of all the nodes of this. It is assumed the argument is a valid divisor. */ + NodeSpec fraction(int divisor); + + static NodeSpec from(int nodeCount, Flavor flavor) { + return new CountNodeSpec(nodeCount, flavor); + } + + static NodeSpec from(NodeType type) { + return new TypeNodeSpec(type); + } + + /** A node spec specifying a node count and a flavor */ + class CountNodeSpec implements NodeSpec { + + private final int count; + private final Flavor flavor; + + public CountNodeSpec(int count, Flavor flavor) { + Objects.requireNonNull(flavor, "A flavor must be specified"); + this.count = count; + this.flavor = flavor; + } + + @Override + public NodeType type() { return NodeType.tenant; } + + @Override + public boolean isCompatible(Flavor flavor) { return flavor.satisfies(this.flavor); } + + @Override + public boolean matchesExactly(Flavor flavor) { return flavor.equals(this.flavor); } + + @Override + public boolean specifiesNonStockFlavor() { return ! flavor.isStock(); } + + @Override + public boolean fulfilledBy(int count) { return count >= this.count; } + + @Override + public boolean saturatedBy(int count) { return fulfilledBy(count); } // min=max for count specs + + @Override + public int surplusGiven(int count) { return count - this.count; } + + @Override + public NodeSpec fraction(int divisor) { return new CountNodeSpec(count/divisor, flavor); } + + @Override + public String toString() { return "request for " + count + " nodes of " + flavor; } + + } + + /** A node spec specifying a node type. This will accept all nodes of this type. */ + class TypeNodeSpec implements NodeSpec { + + private final NodeType type; + + public TypeNodeSpec(NodeType type) { + this.type = type; + } + + @Override + public NodeType type() { return type; } + + @Override + public boolean isCompatible(Flavor flavor) { return true; } + + @Override + public boolean matchesExactly(Flavor flavor) { return false; } + + @Override + public boolean specifiesNonStockFlavor() { return false; } + + @Override + public boolean fulfilledBy(int count) { return true; } + + @Override + public boolean saturatedBy(int count) { return false; } + + @Override + public int surplusGiven(int count) { return 0; } + + @Override + public NodeSpec fraction(int divisor) { return this; } + + @Override + public String toString() { return "request for all nodes of type '" + type + "'"; } + + } + +} diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java index dfb06321233..d34a91aec77 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java @@ -40,28 +40,15 @@ class Preparer { // Note: This operation may make persisted changes to the set of reserved and inactive nodes, // but it may not change the set of active nodes, as the active nodes must stay in sync with the // active config model which is changed on activate - public List<Node> prepare(ApplicationId application, ClusterSpec cluster, int nodes, Flavor flavor, int wantedGroups) { - // TODO: Encode actual assumptions as we have logic that depends on them: - // - Don't allow a cluster spec specifying an explicit group (and then remove the "targetgroup" parameter to moveToActiveGroup - // - Change group ids to be a 0-based integer index - if (cluster.group().isPresent() && wantedGroups > 1) - throw new IllegalArgumentException("Cannot specify both a particular group and request multiple groups"); - if (nodes > 0 && nodes % wantedGroups != 0) - throw new IllegalArgumentException("Requested " + nodes + " nodes in " + wantedGroups + " groups, " + - "which doesn't allow the nodes to be divided evenly into groups"); - - // no group -> this asks for the entire cluster -> we are free to remove groups we won't need - List<Node> surplusNodes = - cluster.group().isPresent() ? new ArrayList<>() : findNodesInRemovableGroups(application, cluster, wantedGroups); + public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) { + List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups); MutableInteger highestIndex = new MutableInteger(findHighestIndex(application, cluster)); List<Node> acceptedNodes = new ArrayList<>(); for (int groupIndex = 0; groupIndex < wantedGroups; groupIndex++) { - // Generated groups always have contiguous indexes starting from 0 - ClusterSpec clusterGroup = - cluster.group().isPresent() ? cluster : cluster.changeGroup(Optional.of(ClusterSpec.Group.from(groupIndex))); - - List<Node> accepted = groupPreparer.prepare(application, clusterGroup, nodes/wantedGroups, flavor, surplusNodes, highestIndex); + ClusterSpec clusterGroup = cluster.changeGroup(Optional.of(ClusterSpec.Group.from(groupIndex))); + List<Node> accepted = groupPreparer.prepare(application, clusterGroup, + requestedNodes.fraction(wantedGroups), surplusNodes, highestIndex); replace(acceptedNodes, accepted); } moveToActiveGroup(surplusNodes, wantedGroups, cluster.group()); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ContainersForHost.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ContainersForHost.java deleted file mode 100644 index a501e7f5a0a..00000000000 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ContainersForHost.java +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.restapi.legacy; - -import java.util.List; - -/** - * Represents the JSON reply for getContainersForHost. - * Serialized by jackson, and therefore uses public fields to avoid writing cruft. - * - * @author tonytv - */ -public class ContainersForHost { - - public List<DockerContainer> dockerContainers; - - public static class DockerContainer { - public String containerHostname; - public String dockerImage; - public String nodeState; - public long wantedRestartGeneration; - public long currentRestartGeneration; - - public DockerContainer( - String containerHostname, - String dockerImage, - String nodeState, - long wantedRestartGeneration, - long currentRestartGeneration) { - this.containerHostname = containerHostname; - this.dockerImage = dockerImage; - this.nodeState = nodeState; - this.wantedRestartGeneration = wantedRestartGeneration; - this.currentRestartGeneration = currentRestartGeneration; - } - } - - public ContainersForHost(List<DockerContainer> dockerContainers) { - this.dockerContainers = dockerContainers; - } - -} diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/HostInfo.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/HostInfo.java deleted file mode 100644 index 81211f978f7..00000000000 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/HostInfo.java +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.restapi.legacy; - -/** - * Value class used to automatically convert to/from JSON. - * - * @author Oyvind Gronnesby - */ -class HostInfo { - - public String hostname; - public String openStackId; - public String flavor; - - public static HostInfo createHostInfo(String hostname, String openStackId, String flavor) { - HostInfo hostInfo = new HostInfo(); - hostInfo.hostname = hostname; - hostInfo.openStackId = openStackId; - hostInfo.flavor = flavor; - return hostInfo; - } - - public String toString(){ - return String.format("%s/%s", openStackId, hostname); - } - -} diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionEndpoint.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionEndpoint.java deleted file mode 100644 index caef4630544..00000000000 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionEndpoint.java +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.restapi.legacy; - -import java.net.MalformedURLException; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URL; - -/** - * To avoid duplication of URI construction. - * This class should be deleted when there's a provision client configured in services xml. - * @author tonytv - */ -public class ProvisionEndpoint { - - public static final int configServerPort = 19071; - - public static URI provisionUri(String configServerHostName, int port) { - try { - return new URL("http", configServerHostName, port, "/hack/provision").toURI(); - } catch (URISyntaxException | MalformedURLException e) { - throw new IllegalArgumentException("Failed creating provisionUri from " + configServerHostName, e); - } - } -} diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionResource.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionResource.java deleted file mode 100644 index da55ef9a15d..00000000000 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionResource.java +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.restapi.legacy; - -import com.yahoo.config.provision.ApplicationId; -import com.yahoo.container.jaxrs.annotation.Component; -import com.yahoo.log.LogLevel; -import com.yahoo.vespa.hosted.provision.Node; -import com.yahoo.vespa.hosted.provision.Node.State; -import com.yahoo.vespa.hosted.provision.NodeRepository; -import com.yahoo.vespa.hosted.provision.node.NodeFlavors; -import com.yahoo.vespa.hosted.provision.restapi.NodeStateSerializer; -import com.yahoo.vespa.hosted.provision.restapi.legacy.ContainersForHost.DockerContainer; - -import javax.ws.rs.*; -import javax.ws.rs.core.MediaType; -import java.util.*; -import java.util.function.Predicate; -import java.util.logging.Logger; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * The provisioning web service used by the provisioning controller to provide nodes to a node repository. - * - * @author mortent - */ -@Path("/provision") -@Produces(MediaType.APPLICATION_JSON) -public class ProvisionResource { - - private static final Logger log = Logger.getLogger(ProvisionResource.class.getName()); - - private final NodeRepository nodeRepository; - - private final NodeFlavors nodeFlavors; - - public ProvisionResource(@Component NodeRepository nodeRepository, @Component NodeFlavors nodeFlavors) { - super(); - this.nodeRepository = nodeRepository; - this.nodeFlavors = nodeFlavors; - } - - - @POST - @Path("/node") - @Consumes(MediaType.APPLICATION_JSON) - public void addNodes(List<HostInfo> hostInfoList) { - List<Node> nodes = new ArrayList<>(); - for (HostInfo hostInfo : hostInfoList) - nodes.add(nodeRepository.createNode(hostInfo.openStackId, hostInfo.hostname, Optional.empty(), nodeFlavors.getFlavorOrThrow(hostInfo.flavor), Node.Type.tenant)); - nodeRepository.addNodes(nodes); - } - - @GET - @Path("/node/required") - public ProvisionStatus getStatus() { - ProvisionStatus provisionStatus = new ProvisionStatus(); - provisionStatus.requiredNodes = 0; // This concept has no meaning any more ... - provisionStatus.decomissionNodes = toHostInfo(nodeRepository.getInactive()); - provisionStatus.failedNodes = toHostInfo(nodeRepository.getFailed()); - - return provisionStatus; - } - - private List<HostInfo> toHostInfo(List<Node> nodes) { - List<HostInfo> hostInfoList = new ArrayList<>(nodes.size()); - for (Node node : nodes) - hostInfoList.add(HostInfo.createHostInfo(node.hostname(), node.openStackId(), "medium")); - return hostInfoList; - } - - - @PUT - @Path("/node/ready") - public void setReady(String hostName) { - if ( nodeRepository.getNode(hostName, Node.State.ready).isPresent()) return; // node already 'ready' - - Optional<Node> node = nodeRepository.getNode(hostName, Node.State.provisioned, Node.State.dirty); - if ( ! node.isPresent()) - throw new IllegalArgumentException("Could not set " + hostName + " ready: Not registered as provisioned or dirty"); - - nodeRepository.setReady(Collections.singletonList(node.get())); - } - - @GET - @Path("/node/usage/{tenantId}") - public TenantStatus getTenantUsage(@PathParam("tenantId") String tenantId) { - TenantStatus ts = new TenantStatus(); - ts.tenantId = tenantId; - ts.allocated = nodeRepository.getNodeCount(tenantId, Node.State.active); - ts.reserved = nodeRepository.getNodeCount(tenantId, Node.State.reserved); - - Map<String, TenantStatus.ApplicationUsage> appinstanceUsageMap = new HashMap<>(); - - nodeRepository.getNodes(Node.Type.tenant, Node.State.active).stream() - .filter(node -> { - return node.allocation().get().owner().tenant().value().equals(tenantId); - }) - .forEach(node -> { - ApplicationId owner = node.allocation().get().owner(); - appinstanceUsageMap.merge( - String.format("%s:%s", owner.application().value(), owner.instance().value()), - TenantStatus.ApplicationUsage.create(owner.application().value(), owner.instance().value(), 1), - (a, b) -> { - a.usage += b.usage; - return a; - } - ); - }); - - ts.applications = new ArrayList<>(appinstanceUsageMap.values()); - return ts; - } - - //TODO: move this to nodes/v2/ when the spec for this has been nailed. - //TODO: Change it to list host nodes, instead of hosts for tenant nodes. - @GET - @Path("/dockerhost/{hostname}") - public ContainersForHost getContainersForHost(@PathParam("hostname") String hostname) { - List<DockerContainer> dockerContainersForHost = - nodeRepository.getNodes(Node.Type.tenant, State.active, State.inactive).stream() - .filter(runsOnDockerHost(hostname)) - .flatMap(ProvisionResource::toDockerContainer) - .collect(Collectors.toList()); - - return new ContainersForHost(dockerContainersForHost); - } - - //returns stream since there is no conversion from optional to stream in java. - private static Stream<DockerContainer> toDockerContainer(Node node) { - try { - String dockerImage = node.allocation().get().membership().cluster().dockerImage().orElseThrow(() -> - new Exception("Docker image not set for node " + node)); - - return Stream.of(new DockerContainer( - node.hostname(), - dockerImage, - NodeStateSerializer.wireNameOf(node.state()), - node.allocation().get().restartGeneration().wanted(), - node.allocation().get().restartGeneration().current())); - } catch (Exception e) { - log.log(LogLevel.ERROR, "Ignoring docker container.", e); - return Stream.empty(); - } - } - - private static Predicate<Node> runsOnDockerHost(String hostname) { - return node -> node.parentHostname().map(hostname::equals).orElse(false); - } -} diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionStatus.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionStatus.java deleted file mode 100644 index 7e0eb41627f..00000000000 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionStatus.java +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.restapi.legacy; - -import java.util.List; - -/** - * Value class used to convert to/from JSON. - * - * @author mortent - */ -class ProvisionStatus { - - public int requiredNodes; - public List<HostInfo> decomissionNodes; - public List<HostInfo> failedNodes; - -} diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/TenantStatus.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/TenantStatus.java deleted file mode 100644 index 4f20670fa12..00000000000 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/TenantStatus.java +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.restapi.legacy; - -import java.util.List; - -/** - * Value class used to convert to/from JSON. - * - * @author Oyvind Gronnesby - */ -class TenantStatus { - - public String tenantId; - public int allocated; - public int reserved; - public List<ApplicationUsage> applications; - - public static class ApplicationUsage { - public String application; - public String instance; - public int usage; - - public static ApplicationUsage create(String applicationId, String instanceId, int usage) { - ApplicationUsage appUsage = new ApplicationUsage(); - appUsage.application = applicationId; - appUsage.instance = instanceId; - appUsage.usage = usage; - return appUsage; - } - } -} diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/package-info.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/package-info.java deleted file mode 100644 index 75ffa3e240e..00000000000 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/legacy/package-info.java +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -@ExportPackage -package com.yahoo.vespa.hosted.provision.restapi.legacy; - -import com.yahoo.osgi.annotation.ExportPackage; - -/** - * Rest API which allows nodes to be added and removed from this node repository - * This API, aptly named "hack" will be removed once the dependencies are off it - Jon, March 2015 - */
\ No newline at end of file diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v1/NodesApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v1/NodesApiHandler.java index da70453c293..1df61f1c6f7 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v1/NodesApiHandler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v1/NodesApiHandler.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.restapi.v1; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterMembership; +import com.yahoo.config.provision.NodeType; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.jdisc.LoggingRequestHandler; @@ -22,7 +23,7 @@ import java.util.Optional; import java.util.concurrent.Executor; /** - * The implementation of the /state/v1 API. + * The implementation of the /nodes/v1 API. * This dumps the content of the node repository on request, possibly with a host filter to return just the single * matching node. * @@ -79,7 +80,7 @@ public class private void toSlime(Node.State state, Cursor object) { Cursor nodeArray = null; // create if there are nodes - for (Node.Type type : Node.Type.values()) { + for (NodeType type : NodeType.values()) { List<Node> nodes = nodeRepository.getNodes(type, state); for (Node node : nodes) { if (hostnameFilter.isPresent() && !node.hostname().equals(hostnameFilter.get())) continue; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesApiHandler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesApiHandler.java index 9e240ba6055..1cea59ef79b 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesApiHandler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesApiHandler.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.hosted.provision.restapi.v2; import com.yahoo.config.provision.HostFilter; +import com.yahoo.config.provision.NodeType; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.jdisc.LoggingRequestHandler; @@ -34,7 +35,7 @@ import java.util.logging.Level; import static com.yahoo.vespa.config.SlimeUtils.optionalString; /** - * The implementation of the /state/v2 API. + * The implementation of the /nodes/v2 API. * See RestApiTest for documentation. * * @author bratseth @@ -198,12 +199,12 @@ public class NodesApiHandler extends LoggingRequestHandler { nodeTypeFromSlime(inspector.field(nodeTypeKey))); } - private Node.Type nodeTypeFromSlime(Inspector object) { - if (! object.valid()) return Node.Type.tenant; // default + private NodeType nodeTypeFromSlime(Inspector object) { + if (! object.valid()) return NodeType.tenant; // default switch (object.asString()) { - case "tenant" : return Node.Type.tenant; - case "host" : return Node.Type.host; - case "proxy" : return Node.Type.proxy; + case "tenant" : return NodeType.tenant; + case "host" : return NodeType.host; + case "proxy" : return NodeType.proxy; default: throw new IllegalArgumentException("Unknown node type '" + object.asString() + "'"); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java index b81fe0c4417..c245230bfa3 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.restapi.v2; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterMembership; +import com.yahoo.config.provision.NodeType; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.slime.Cursor; @@ -102,7 +103,7 @@ class NodesResponse extends HttpResponse { /** Outputs the nodes in the given state to a node array */ private void nodesToSlime(Node.State state, Cursor parentObject) { Cursor nodeArray = parentObject.setArray("nodes"); - for (Node.Type type : Node.Type.values()) + for (NodeType type : NodeType.values()) toSlime(nodeRepository.getNodes(type, state), nodeArray); } @@ -110,7 +111,7 @@ class NodesResponse extends HttpResponse { private void nodesToSlime(Cursor parentObject) { Cursor nodeArray = parentObject.setArray("nodes"); for (Node.State state : Node.State.values()) { - for (Node.Type type : Node.Type.values()) + for (NodeType type : NodeType.values()) toSlime(nodeRepository.getNodes(type, state), nodeArray); } } @@ -175,7 +176,7 @@ class NodesResponse extends HttpResponse { toSlime(node.history(), object.setArray("history")); } - private String toString(Node.Type type) { + private String toString(NodeType type) { switch(type) { case tenant: return "tenant"; case host: return "host"; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/FlavorConfigBuilder.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/FlavorConfigBuilder.java index b312e7c85ca..748a5b6c558 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/FlavorConfigBuilder.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/FlavorConfigBuilder.java @@ -31,6 +31,19 @@ public class FlavorConfigBuilder { return flavor; } + public NodeRepositoryConfig.Flavor.Builder addNonStockFlavor(String flavorName, double cpu, double mem, double disk, Flavor.Type type) { + NodeRepositoryConfig.Flavor.Builder flavor = new NodeRepositoryConfig.Flavor.Builder(); + flavor.name(flavorName); + flavor.description("Flavor-name-is-" + flavorName); + flavor.minDiskAvailableGb(disk); + flavor.minCpuCores(cpu); + flavor.minMainMemoryAvailableGb(mem); + flavor.stock(false); + flavor.environment(type.name()); + builder.flavor(flavor); + return flavor; + } + public void addReplaces(String replaces, NodeRepositoryConfig.Flavor.Builder flavor) { NodeRepositoryConfig.Flavor.Replaces.Builder flavorReplaces = new NodeRepositoryConfig.Flavor.Replaces.Builder(); flavorReplaces.name(replaces); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java index ea6f581413e..5e1fd2357cb 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java @@ -8,6 +8,7 @@ import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.HostSpec; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; import com.yahoo.transaction.NestedTransaction; @@ -47,22 +48,22 @@ public class MockNodeRepository extends NodeRepository { NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(this, flavors, Zone.defaultZone()); List<Node> nodes = new ArrayList<>(); - nodes.add(createNode("node1", "host1.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(createNode("node2", "host2.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(createNode("node3", "host3.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("expensive"), Node.Type.tenant)); + nodes.add(createNode("node1", "host1.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(createNode("node2", "host2.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(createNode("node3", "host3.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("expensive"), NodeType.tenant)); // TODO: Use docker flavor - Node node4 = createNode("node4", "host4.yahoo.com", Optional.of("dockerhost4"), flavors.getFlavorOrThrow("default"), Node.Type.tenant); + Node node4 = createNode("node4", "host4.yahoo.com", Optional.of("dockerhost4"), flavors.getFlavorOrThrow("default"), NodeType.tenant); node4 = node4.with(node4.status().withDockerImage("image-12")); nodes.add(node4); - Node node5 = createNode("node5", "host5.yahoo.com", Optional.of("dockerhost"), flavors.getFlavorOrThrow("default"), Node.Type.tenant); + Node node5 = createNode("node5", "host5.yahoo.com", Optional.of("dockerhost"), flavors.getFlavorOrThrow("default"), NodeType.tenant); nodes.add(node5.with(node5.status().withDockerImage("image-123").withVespaVersion(new Version("1.2.3")))); - nodes.add(createNode("node6", "host6.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(createNode("node7", "host7.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); + nodes.add(createNode("node6", "host6.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(createNode("node7", "host7.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); // 8 and 9 are added by web service calls - Node node10 = createNode("node10", "host10.yahoo.com", Optional.of("parent.yahoo.com"), flavors.getFlavorOrThrow("default"), Node.Type.tenant); + Node node10 = createNode("node10", "host10.yahoo.com", Optional.of("parent.yahoo.com"), flavors.getFlavorOrThrow("default"), NodeType.tenant); Status node10newStatus = node10.status(); node10newStatus = node10newStatus .withVespaVersion(Version.fromString("5.104.142")) @@ -71,8 +72,8 @@ public class MockNodeRepository extends NodeRepository { node10 = node10.with(node10newStatus); nodes.add(node10); - nodes.add(createNode("node55", "host55.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(createNode("parent1", "parent1.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.host)); + nodes.add(createNode("node55", "host55.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(createNode("parent1", "parent1.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.host)); nodes = addNodes(nodes); nodes.remove(6); diff --git a/node-repository/src/main/resources/configdefinitions/node-repository.def b/node-repository/src/main/resources/configdefinitions/node-repository.def index cd053adca61..f9b500594bd 100644 --- a/node-repository/src/main/resources/configdefinitions/node-repository.def +++ b/node-repository/src/main/resources/configdefinitions/node-repository.def @@ -18,6 +18,13 @@ flavor[].replaces[].name string # the expected lifetime of the node (usually three years). flavor[].cost int default=0 +# A stock flavor is any flavor which we expect to buy more of in the future. +# Stock flavors are assigned to applications by cost priority. +# +# Non-stock flavors are used for nodes for which a fixed amount has already been purchased +# for some historical reason. These nodes are assigned to applications by exact match and ignoring cost. +flavor[].stock bool default=true + # The type of node (e.g. bare metal, docker..). flavor[].environment string default="undefined" diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java index c63e51fc796..6a881fc7d7e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java @@ -1,5 +1,6 @@ package com.yahoo.vespa.hosted.provision; +import com.yahoo.config.provision.NodeType; import org.junit.Test; import static org.junit.Assert.assertEquals; @@ -15,18 +16,18 @@ public class NodeRepositoryTest { @Test public void nodeRepositoryTest() { NodeRepositoryTester tester = new NodeRepositoryTester(); - assertEquals(0, tester.getNodes(Node.Type.tenant).size()); + assertEquals(0, tester.getNodes(NodeType.tenant).size()); - tester.addNode("id1", "host1", "default", Node.Type.tenant); - tester.addNode("id2", "host2", "default", Node.Type.tenant); - tester.addNode("id3", "host3", "default", Node.Type.tenant); + tester.addNode("id1", "host1", "default", NodeType.tenant); + tester.addNode("id2", "host2", "default", NodeType.tenant); + tester.addNode("id3", "host3", "default", NodeType.tenant); - assertEquals(3, tester.getNodes(Node.Type.tenant).size()); + assertEquals(3, tester.getNodes(NodeType.tenant).size()); tester.nodeRepository().move("host2", Node.State.parked); assertTrue(tester.nodeRepository().remove("host2")); - assertEquals(2, tester.getNodes(Node.Type.tenant).size()); + assertEquals(2, tester.getNodes(NodeType.tenant).size()); } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java index b66ddab357d..46bf29d887f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java @@ -1,5 +1,6 @@ package com.yahoo.vespa.hosted.provision; +import com.yahoo.config.provision.NodeType; import com.yahoo.test.ManualClock; import com.yahoo.vespa.config.nodes.NodeRepositoryConfig; import com.yahoo.vespa.curator.mock.MockCurator; @@ -34,11 +35,11 @@ public class NodeRepositoryTester { public NodeRepository nodeRepository() { return nodeRepository; } public MockCurator curator() { return curator; } - public List<Node> getNodes(Node.Type type, Node.State ... inState) { + public List<Node> getNodes(NodeType type, Node.State ... inState) { return nodeRepository.getNodes(type, inState); } - public Node addNode(String id, String hostname, String flavor, Node.Type type) { + public Node addNode(String id, String hostname, String flavor, NodeType type) { Node node = nodeRepository.createNode(id, hostname, Optional.empty(), nodeFlavors.getFlavorOrThrow(flavor), type); return nodeRepository.addNodes(Collections.singletonList(node)).get(0); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainerTest.java index 17cad78a843..bc902a4d910 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainerTest.java @@ -8,6 +8,7 @@ import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.HostSpec; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; @@ -63,23 +64,23 @@ public class ApplicationMaintainerTest { int failedOrParkedInApp2 = 2; assertEquals(fixture.wantedNodesApp1 - failedInApp1, nodeRepository.getNodes(fixture.app1, Node.State.active).size()); assertEquals(fixture.wantedNodesApp2 - failedOrParkedInApp2, nodeRepository.getNodes(fixture.app2, Node.State.active).size()); - assertEquals(failedInApp1 + failedOrParkedInApp2, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed, Node.State.parked).size()); - assertEquals(3, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); - assertEquals(2, nodeRepository.getNodes(Node.Type.host, Node.State.ready).size()); + assertEquals(failedInApp1 + failedOrParkedInApp2, nodeRepository.getNodes(NodeType.tenant, Node.State.failed, Node.State.parked).size()); + assertEquals(3, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(2, nodeRepository.getNodes(NodeType.host, Node.State.ready).size()); // Cause maintenance deployment which will allocate replacement nodes fixture.runApplicationMaintainer(); assertEquals(fixture.wantedNodesApp1, nodeRepository.getNodes(fixture.app1, Node.State.active).size()); assertEquals(fixture.wantedNodesApp2, nodeRepository.getNodes(fixture.app2, Node.State.active).size()); - assertEquals(0, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); + assertEquals(0, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); // Reactivate the previously failed nodes - nodeRepository.reactivate(nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).get(0).hostname()); - nodeRepository.reactivate(nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).get(0).hostname()); - nodeRepository.reactivate(nodeRepository.getNodes(Node.Type.tenant, Node.State.parked).get(0).hostname()); + nodeRepository.reactivate(nodeRepository.getNodes(NodeType.tenant, Node.State.failed).get(0).hostname()); + nodeRepository.reactivate(nodeRepository.getNodes(NodeType.tenant, Node.State.failed).get(0).hostname()); + nodeRepository.reactivate(nodeRepository.getNodes(NodeType.tenant, Node.State.parked).get(0).hostname()); int reactivatedInApp1 = 1; int reactivatedInApp2 = 2; - assertEquals(0, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); + assertEquals(0, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); assertEquals(fixture.wantedNodesApp1 + reactivatedInApp1, nodeRepository.getNodes(fixture.app1, Node.State.active).size()); assertEquals(fixture.wantedNodesApp2 + reactivatedInApp2, nodeRepository.getNodes(fixture.app2, Node.State.active).size()); assertEquals("The reactivated nodes are now active but not part of the application", @@ -96,7 +97,7 @@ public class ApplicationMaintainerTest { private void createReadyNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) { List<Node> nodes = new ArrayList<>(count); for (int i = 0; i < count; i++) - nodes.add(nodeRepository.createNode("node" + i, "host" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant)); + nodes.add(nodeRepository.createNode("node" + i, "host" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant)); nodes = nodeRepository.addNodes(nodes); nodeRepository.setReady(nodes); } @@ -104,7 +105,7 @@ public class ApplicationMaintainerTest { private void createHostNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) { List<Node> nodes = new ArrayList<>(count); for (int i = 0; i < count; i++) - nodes.add(nodeRepository.createNode("hostNode" + i, "realHost" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.host)); + nodes.add(nodeRepository.createNode("hostNode" + i, "realHost" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host)); nodes = nodeRepository.addNodes(nodes); nodeRepository.setReady(nodes); } @@ -142,14 +143,16 @@ public class ApplicationMaintainerTest { void runApplicationMaintainer() { Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); - apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, wantedNodesApp1, Optional.of("default"), 1)); - apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, wantedNodesApp2, Optional.of("default"), 1)); + apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, + Capacity.fromNodeCount(wantedNodesApp1, Optional.of("default")), 1)); + apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, + Capacity.fromNodeCount(wantedNodesApp2, Optional.of("default")), 1)); MockDeployer deployer = new MockDeployer(provisioner, apps); new ApplicationMaintainer(deployer, nodeRepository, Duration.ofMinutes(30)).run(); } NodeList getNodes(Node.State ... states) { - return new NodeList(nodeRepository.getNodes(Node.Type.tenant, states)); + return new NodeList(nodeRepository.getNodes(NodeType.tenant, states)); } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java index e20cd1f2921..e1a3f115676 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java @@ -8,6 +8,7 @@ import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.HostSpec; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; @@ -45,18 +46,18 @@ public class FailedExpirerTest { public void ensure_failed_nodes_are_deallocated_in_prod() throws InterruptedException { NodeRepository nodeRepository = failureScenarioIn(Environment.prod); - assertEquals(2, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); - assertEquals(1, nodeRepository.getNodes(Node.Type.tenant, Node.State.dirty).size()); - assertEquals("node3", nodeRepository.getNodes(Node.Type.tenant, Node.State.dirty).get(0).hostname()); + assertEquals(2, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals(1, nodeRepository.getNodes(NodeType.tenant, Node.State.dirty).size()); + assertEquals("node3", nodeRepository.getNodes(NodeType.tenant, Node.State.dirty).get(0).hostname()); } @Test public void ensure_failed_nodes_are_deallocated_in_dev() throws InterruptedException { NodeRepository nodeRepository = failureScenarioIn(Environment.dev); - assertEquals(1, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); - assertEquals(2, nodeRepository.getNodes(Node.Type.tenant, Node.State.dirty).size()); - assertEquals("node2", nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).get(0).hostname()); + assertEquals(1, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals(2, nodeRepository.getNodes(NodeType.tenant, Node.State.dirty).size()); + assertEquals("node2", nodeRepository.getNodes(NodeType.tenant, Node.State.failed).get(0).hostname()); } private NodeRepository failureScenarioIn(Environment environment) { @@ -66,14 +67,14 @@ public class FailedExpirerTest { NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, Zone.defaultZone(), clock); List<Node> nodes = new ArrayList<>(3); - nodes.add(nodeRepository.createNode("node1", "node1", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(nodeRepository.createNode("node2", "node2", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(nodeRepository.createNode("node3", "node3", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant)); + nodes.add(nodeRepository.createNode("node1", "node1", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(nodeRepository.createNode("node2", "node2", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(nodeRepository.createNode("node3", "node3", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant)); nodeRepository.addNodes(nodes); List<Node> hostNodes = new ArrayList<>(1); - hostNodes.add(nodeRepository.createNode("parent1", "parent1", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.host)); - hostNodes.add(nodeRepository.createNode("parent2", "parent2", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.host)); + hostNodes.add(nodeRepository.createNode("parent1", "parent1", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host)); + hostNodes.add(nodeRepository.createNode("parent2", "parent2", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host)); nodeRepository.addNodes(hostNodes); @@ -91,20 +92,20 @@ public class FailedExpirerTest { nodeRepository.write(node2); // Allocate the nodes - nodeRepository.setReady(nodeRepository.getNodes(Node.Type.tenant, Node.State.provisioned)); + nodeRepository.setReady(nodeRepository.getNodes(NodeType.tenant, Node.State.provisioned)); ApplicationId applicationId = ApplicationId.from(TenantName.from("foo"), ApplicationName.from("bar"), InstanceName.from("fuz")); ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Optional.empty()); provisioner.prepare(applicationId, cluster, Capacity.fromNodeCount(3), 1, null); NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator)); provisioner.activate(transaction, applicationId, asHosts(nodes)); transaction.commit(); - assertEquals(3, nodeRepository.getNodes(Node.Type.tenant, Node.State.active).size()); + assertEquals(3, nodeRepository.getNodes(NodeType.tenant, Node.State.active).size()); // Fail the nodes nodeRepository.fail("node1"); nodeRepository.fail("node2"); nodeRepository.fail("node3"); - assertEquals(3, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); + assertEquals(3, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); // Failure times out clock.advance(Duration.ofDays(5)); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java index 84d3ece5698..8f20b814b9b 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InactiveAndFailedExpirerTest.java @@ -7,6 +7,7 @@ import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.HostSpec; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; import com.yahoo.test.ManualClock; @@ -49,13 +50,13 @@ public class InactiveAndFailedExpirerTest { NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, Zone.defaultZone(), clock); List<Node> nodes = new ArrayList<>(2); - nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant)); + nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant)); nodeRepository.addNodes(nodes); List<Node> hostNodes = new ArrayList<>(2); - hostNodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.host)); - hostNodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.host)); + hostNodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host)); + hostNodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host)); nodeRepository.addNodes(hostNodes); // Allocate then deallocate 2 nodes @@ -66,18 +67,18 @@ public class InactiveAndFailedExpirerTest { NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator)); provisioner.activate(transaction, applicationId, asHosts(nodes)); transaction.commit(); - assertEquals(2, nodeRepository.getNodes(Node.Type.tenant, Node.State.active).size()); + assertEquals(2, nodeRepository.getNodes(NodeType.tenant, Node.State.active).size()); NestedTransaction deactivateTransaction = new NestedTransaction(); nodeRepository.deactivate(applicationId, deactivateTransaction); deactivateTransaction.commit(); - assertEquals(2, nodeRepository.getNodes(Node.Type.tenant, Node.State.inactive).size()); + assertEquals(2, nodeRepository.getNodes(NodeType.tenant, Node.State.inactive).size()); // Inactive times out clock.advance(Duration.ofMinutes(14)); new InactiveExpirer(nodeRepository, clock, Duration.ofMinutes(10)).run(); - assertEquals(0, nodeRepository.getNodes(Node.Type.tenant, Node.State.inactive).size()); - List<Node> dirty = nodeRepository.getNodes(Node.Type.tenant, Node.State.dirty); + assertEquals(0, nodeRepository.getNodes(NodeType.tenant, Node.State.inactive).size()); + List<Node> dirty = nodeRepository.getNodes(NodeType.tenant, Node.State.dirty); assertEquals(2, dirty.size()); assertFalse(dirty.get(0).allocation().isPresent()); assertFalse(dirty.get(1).allocation().isPresent()); @@ -90,8 +91,8 @@ public class InactiveAndFailedExpirerTest { // Dirty times out for the other one clock.advance(Duration.ofMinutes(14)); new DirtyExpirer(nodeRepository, clock, Duration.ofMinutes(10)).run(); - assertEquals(0, nodeRepository.getNodes(Node.Type.tenant, Node.State.dirty).size()); - List<Node> failed = nodeRepository.getNodes(Node.Type.tenant, Node.State.failed); + assertEquals(0, nodeRepository.getNodes(NodeType.tenant, Node.State.dirty).size()); + List<Node> failed = nodeRepository.getNodes(NodeType.tenant, Node.State.failed); assertEquals(1, failed.size()); assertEquals(1, failed.get(0).status().failCount()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MockDeployer.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MockDeployer.java index 7e884b35e16..ea7ed099b4e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MockDeployer.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MockDeployer.java @@ -28,7 +28,7 @@ public class MockDeployer implements Deployer { public int redeployments = 0; /** - * Create a mock deployer which contains a substitute for an application repository, sufficient to + * Create a mock deployer which contains a substitute for an application repository, fullfilled to * be able to call provision with the right parameters. */ public MockDeployer(NodeRepositoryProvisioner provisioner, Map<ApplicationId, ApplicationContext> applications) { @@ -76,17 +76,15 @@ public class MockDeployer implements Deployer { /** An application context which substitutes for an application repository */ public static class ApplicationContext { - private ApplicationId id; - private ClusterSpec cluster; - private int wantedNodes; - private Optional<String> flavor; - private int groups; + private final ApplicationId id; + private final ClusterSpec cluster; + private final Capacity capacity; + private final int groups; - public ApplicationContext(ApplicationId id, ClusterSpec cluster, int wantedNodes, Optional<String> flavor, int groups) { + public ApplicationContext(ApplicationId id, ClusterSpec cluster, Capacity capacity, int groups) { this.id = id; this.cluster = cluster; - this.wantedNodes = wantedNodes; - this.flavor = flavor; + this.capacity = capacity; this.groups = groups; } @@ -96,7 +94,7 @@ public class MockDeployer implements Deployer { public ClusterSpec cluster() { return cluster; } private List<HostSpec> prepare(NodeRepositoryProvisioner provisioner) { - return provisioner.prepare(id, cluster, Capacity.fromNodeCount(wantedNodes, flavor), groups, null); + return provisioner.prepare(id, cluster, capacity, groups, null); } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java new file mode 100644 index 00000000000..4e63e7a6203 --- /dev/null +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java @@ -0,0 +1,334 @@ +package com.yahoo.vespa.hosted.provision.maintenance; + +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.ApplicationName; +import com.yahoo.config.provision.Capacity; +import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.Environment; +import com.yahoo.config.provision.HostLivenessTracker; +import com.yahoo.config.provision.HostSpec; +import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; +import com.yahoo.config.provision.RegionName; +import com.yahoo.config.provision.TenantName; +import com.yahoo.config.provision.Zone; +import com.yahoo.test.ManualClock; +import com.yahoo.transaction.NestedTransaction; +import com.yahoo.vespa.applicationmodel.ApplicationInstance; +import com.yahoo.vespa.applicationmodel.ApplicationInstanceId; +import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference; +import com.yahoo.vespa.applicationmodel.ClusterId; +import com.yahoo.vespa.applicationmodel.ConfigId; +import com.yahoo.vespa.applicationmodel.HostName; +import com.yahoo.vespa.applicationmodel.ServiceCluster; +import com.yahoo.vespa.applicationmodel.ServiceInstance; +import com.yahoo.vespa.applicationmodel.ServiceType; +import com.yahoo.vespa.applicationmodel.TenantId; +import com.yahoo.vespa.curator.Curator; +import com.yahoo.vespa.curator.mock.MockCurator; +import com.yahoo.vespa.curator.transaction.CuratorTransaction; +import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeRepository; +import com.yahoo.vespa.hosted.provision.node.Flavor; +import com.yahoo.vespa.hosted.provision.node.NodeFlavors; +import com.yahoo.vespa.hosted.provision.provisioning.NodeRepositoryProvisioner; +import com.yahoo.vespa.hosted.provision.testutils.FlavorConfigBuilder; +import com.yahoo.vespa.orchestrator.ApplicationIdNotFoundException; +import com.yahoo.vespa.orchestrator.ApplicationStateChangeDeniedException; +import com.yahoo.vespa.orchestrator.BatchHostNameNotFoundException; +import com.yahoo.vespa.orchestrator.BatchInternalErrorException; +import com.yahoo.vespa.orchestrator.HostNameNotFoundException; +import com.yahoo.vespa.orchestrator.Orchestrator; +import com.yahoo.vespa.orchestrator.policy.BatchHostStateChangeDeniedException; +import com.yahoo.vespa.orchestrator.policy.HostStateChangeDeniedException; +import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus; +import com.yahoo.vespa.orchestrator.status.HostStatus; +import com.yahoo.vespa.service.monitor.ServiceMonitor; +import com.yahoo.vespa.service.monitor.ServiceMonitorStatus; + +import java.time.Clock; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; + +import static org.junit.Assert.assertEquals; + +/** + * @author bratseth + */ +public class NodeFailTester { + + // Immutable components + public static final ApplicationId app1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz")); + public static final ApplicationId app2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz")); + public static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default", "docker"); + private static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east")); + private static final Duration downtimeLimitOneHour = Duration.ofMinutes(60); + + // Components with state + public final ManualClock clock; + public final NodeRepository nodeRepository; + public NodeFailer failer; + public ServiceMonitorStub serviceMonitor; + public MockDeployer deployer; + private final TestHostLivenessTracker hostLivenessTracker; + private final Orchestrator orchestrator; + private final NodeRepositoryProvisioner provisioner; + private final Curator curator; + + public NodeFailTester() { + clock = new ManualClock(); + curator = new MockCurator(); + nodeRepository = new NodeRepository(nodeFlavors, curator, clock); + provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone); + hostLivenessTracker = new TestHostLivenessTracker(clock); + orchestrator = new OrchestratorMock(); + } + + public static NodeFailTester withTwoApplications() { + NodeFailTester tester = new NodeFailTester(); + + tester.createReadyNodes(16); + tester.createHostNodes(3); + + // Create applications + ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Optional.empty()); + ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Optional.empty()); + int wantedNodesApp1 = 5; + int wantedNodesApp2 = 7; + tester.activate(app1, clusterApp1, wantedNodesApp1); + tester.activate(app2, clusterApp2, wantedNodesApp2); + assertEquals(wantedNodesApp1, tester.nodeRepository.getNodes(app1, Node.State.active).size()); + assertEquals(wantedNodesApp2, tester.nodeRepository.getNodes(app2, Node.State.active).size()); + + Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); + apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, Capacity.fromNodeCount(wantedNodesApp1, Optional.of("default")), 1)); + apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, Capacity.fromNodeCount(wantedNodesApp2, Optional.of("default")), 1)); + tester.deployer = new MockDeployer(tester.provisioner, apps); + tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository); + tester.failer = tester.createFailer(); + return tester; + } + + public static NodeFailTester withProxyApplication() { + NodeFailTester tester = new NodeFailTester(); + + tester.createReadyNodes(16, NodeType.proxy); + + // Create application + Capacity allProxies = Capacity.fromRequiredNodeType(NodeType.proxy); + ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Optional.empty()); + tester.activate(app1, clusterApp1, allProxies); + assertEquals(16, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size()); + + Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); + apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, allProxies, 1)); + tester.deployer = new MockDeployer(tester.provisioner, apps); + tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository); + tester.failer = tester.createFailer(); + return tester; + } + + public void suspend(ApplicationId app) { + try { + orchestrator.suspend(app); + } + catch (Exception e) { + throw new RuntimeException(e); + } + } + + public NodeFailer createFailer() { + return new NodeFailer(deployer, hostLivenessTracker, serviceMonitor, nodeRepository, downtimeLimitOneHour, clock, orchestrator); + } + + public void allNodesMakeAConfigRequestExcept(Node ... deadNodeArray) { + Set<Node> deadNodes = new HashSet<>(Arrays.asList(deadNodeArray)); + for (Node node : nodeRepository.getNodes(NodeType.tenant)) { + if ( ! deadNodes.contains(node) && node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) + hostLivenessTracker.receivedRequestFrom(node.hostname()); + } + } + + public void createReadyNodes(int count) { + createReadyNodes(count, 0); + } + + public void createReadyNodes(int count, NodeType nodeType) { + createReadyNodes(count, 0, nodeFlavors.getFlavorOrThrow("default"), nodeType); + } + + public void createReadyNodes(int count, int startIndex) { + createReadyNodes(count, startIndex, "default"); + } + + public void createReadyNodes(int count, int startIndex, String flavor) { + createReadyNodes(count, startIndex, nodeFlavors.getFlavorOrThrow(flavor), NodeType.tenant); + } + + private void createReadyNodes(int count, int startIndex, Flavor flavor, NodeType nodeType) { + List<Node> nodes = new ArrayList<>(count); + for (int i = startIndex; i < startIndex + count; i++) + nodes.add(nodeRepository.createNode("node" + i, "host" + i, Optional.empty(), flavor, nodeType)); + nodes = nodeRepository.addNodes(nodes); + nodeRepository.setReady(nodes); + } + + private void createHostNodes(int count) { + List<Node> nodes = new ArrayList<>(count); + for (int i = 0; i < count; i++) + nodes.add(nodeRepository.createNode("parent" + i, "parent" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host)); + nodes = nodeRepository.addNodes(nodes); + nodeRepository.setReady(nodes); + } + + private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodeCount) { + activate(applicationId, cluster, Capacity.fromNodeCount(nodeCount)); + } + private void activate(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) { + List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, capacity, 1, null); + NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator)); + provisioner.activate(transaction, applicationId, hosts); + transaction.commit(); + } + + /** Returns the node with the highest membership index from the given set of allocated nodes */ + public Node highestIndex(List<Node> nodes) { + Node highestIndex = null; + for (Node node : nodes) { + if (highestIndex == null || node.allocation().get().membership().index() > + highestIndex.allocation().get().membership().index()) + highestIndex = node; + } + return highestIndex; + } + + /** This is a fully functional implementation */ + private static class TestHostLivenessTracker implements HostLivenessTracker { + + private final Clock clock; + private final Map<String, Instant> lastRequestFromHost = new HashMap<>(); + + public TestHostLivenessTracker(Clock clock) { + this.clock = clock; + } + + @Override + public void receivedRequestFrom(String hostname) { + lastRequestFromHost.put(hostname, clock.instant()); + } + + @Override + public Optional<Instant> lastRequestFrom(String hostname) { + return Optional.ofNullable(lastRequestFromHost.get(hostname)); + } + + } + + public static class ServiceMonitorStub implements ServiceMonitor { + + private final Map<ApplicationId, MockDeployer.ApplicationContext> apps; + private final NodeRepository nodeRepository; + + private Set<String> downHosts = new HashSet<>(); + private boolean statusIsKnown = true; + + /** Create a service monitor where all nodes are initially up */ + public ServiceMonitorStub(Map<ApplicationId, MockDeployer.ApplicationContext> apps, NodeRepository nodeRepository) { + this.apps = apps; + this.nodeRepository = nodeRepository; + } + + public void setHostDown(String hostname) { + downHosts.add(hostname); + } + + public void setHostUp(String hostname) { + downHosts.remove(hostname); + } + + public void setStatusIsKnown(boolean statusIsKnown) { + this.statusIsKnown = statusIsKnown; + } + + private ServiceMonitorStatus getHostStatus(String hostname) { + if ( ! statusIsKnown) return ServiceMonitorStatus.NOT_CHECKED; + if (downHosts.contains(hostname)) return ServiceMonitorStatus.DOWN; + return ServiceMonitorStatus.UP; + } + + @Override + public Map<ApplicationInstanceReference, ApplicationInstance<ServiceMonitorStatus>> queryStatusOfAllApplicationInstances() { + // Convert apps information to the response payload to return + Map<ApplicationInstanceReference, ApplicationInstance<ServiceMonitorStatus>> status = new HashMap<>(); + for (Map.Entry<ApplicationId, MockDeployer.ApplicationContext> app : apps.entrySet()) { + Set<ServiceInstance<ServiceMonitorStatus>> serviceInstances = new HashSet<>(); + for (Node node : nodeRepository.getNodes(app.getValue().id(), Node.State.active)) { + serviceInstances.add(new ServiceInstance<>(new ConfigId("configid"), + new HostName(node.hostname()), + getHostStatus(node.hostname()))); + } + Set<ServiceCluster<ServiceMonitorStatus>> serviceClusters = new HashSet<>(); + serviceClusters.add(new ServiceCluster<>(new ClusterId(app.getValue().cluster().id().value()), + new ServiceType("serviceType"), + serviceInstances)); + TenantId tenantId = new TenantId(app.getKey().tenant().value()); + ApplicationInstanceId applicationInstanceId = new ApplicationInstanceId(app.getKey().application().value()); + status.put(new ApplicationInstanceReference(tenantId, applicationInstanceId), + new ApplicationInstance<>(tenantId, applicationInstanceId, serviceClusters)); + } + return status; + } + + } + + class OrchestratorMock implements Orchestrator { + + Set<ApplicationId> suspendedApplications = new HashSet<>(); + + @Override + public HostStatus getNodeStatus(HostName hostName) throws HostNameNotFoundException { + return null; + } + + @Override + public void resume(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {} + + @Override + public void suspend(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {} + + @Override + public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationId appId) throws ApplicationIdNotFoundException { + return suspendedApplications.contains(appId) + ? ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN : ApplicationInstanceStatus.NO_REMARKS; + } + + @Override + public Set<ApplicationId> getAllSuspendedApplications() { + return null; + } + + @Override + public void resume(ApplicationId appId) throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException { + suspendedApplications.remove(appId); + } + + @Override + public void suspend(ApplicationId appId) throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException { + suspendedApplications.add(appId); + } + + @Override + public void suspendAll(HostName parentHostname, List<HostName> hostNames) throws BatchInternalErrorException, BatchHostStateChangeDeniedException, BatchHostNameNotFoundException { + throw new RuntimeException("Not implemented"); + } + } + +} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java index 2d1c441d283..d8d0bc99063 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java @@ -1,69 +1,23 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.maintenance; -import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.ApplicationName; -import com.yahoo.config.provision.Capacity; -import com.yahoo.config.provision.ClusterSpec; -import com.yahoo.config.provision.Environment; -import com.yahoo.config.provision.HostLivenessTracker; -import com.yahoo.config.provision.HostSpec; -import com.yahoo.config.provision.InstanceName; -import com.yahoo.config.provision.RegionName; -import com.yahoo.config.provision.TenantName; -import com.yahoo.config.provision.Zone; -import com.yahoo.test.ManualClock; -import com.yahoo.transaction.NestedTransaction; -import com.yahoo.vespa.applicationmodel.ApplicationInstance; -import com.yahoo.vespa.applicationmodel.ApplicationInstanceId; -import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference; -import com.yahoo.vespa.applicationmodel.ClusterId; -import com.yahoo.vespa.applicationmodel.ConfigId; -import com.yahoo.vespa.applicationmodel.HostName; -import com.yahoo.vespa.applicationmodel.ServiceCluster; -import com.yahoo.vespa.applicationmodel.ServiceInstance; -import com.yahoo.vespa.applicationmodel.ServiceType; -import com.yahoo.vespa.applicationmodel.TenantId; -import com.yahoo.vespa.curator.Curator; -import com.yahoo.vespa.curator.mock.MockCurator; -import com.yahoo.vespa.curator.transaction.CuratorTransaction; +import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.hosted.provision.Node; -import com.yahoo.vespa.hosted.provision.NodeRepository; -import com.yahoo.vespa.hosted.provision.node.Flavor; -import com.yahoo.vespa.hosted.provision.node.NodeFlavors; import com.yahoo.vespa.hosted.provision.node.Status; -import com.yahoo.vespa.hosted.provision.provisioning.NodeRepositoryProvisioner; -import com.yahoo.vespa.hosted.provision.testutils.FlavorConfigBuilder; import com.yahoo.vespa.orchestrator.ApplicationIdNotFoundException; import com.yahoo.vespa.orchestrator.ApplicationStateChangeDeniedException; -import com.yahoo.vespa.orchestrator.BatchHostNameNotFoundException; -import com.yahoo.vespa.orchestrator.BatchInternalErrorException; -import com.yahoo.vespa.orchestrator.HostNameNotFoundException; -import com.yahoo.vespa.orchestrator.Orchestrator; -import com.yahoo.vespa.orchestrator.policy.BatchHostStateChangeDeniedException; -import com.yahoo.vespa.orchestrator.policy.HostStateChangeDeniedException; -import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus; -import com.yahoo.vespa.orchestrator.status.HostStatus; -import com.yahoo.vespa.service.monitor.ServiceMonitor; -import com.yahoo.vespa.service.monitor.ServiceMonitorStatus; -import org.junit.Before; import org.junit.Test; -import java.time.Clock; import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; /** * Tests automatic failing of nodes. @@ -72,388 +26,223 @@ import static org.junit.Assert.assertTrue; */ public class NodeFailerTest { - // Immutable components - private static final Zone ZONE = new Zone(Environment.prod, RegionName.from("us-east")); - private static final NodeFlavors NODE_FLAVORS = FlavorConfigBuilder.createDummies("default", "docker"); - private static final ApplicationId APP_1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz")); - private static final ApplicationId APP_2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz")); - private static final Duration DOWNTIME_LIMIT_ONE_HOUR = Duration.ofMinutes(60); - - // Components with state - private ManualClock clock; - private Curator curator; - private TestHostLivenessTracker hostLivenessTracker; - private ServiceMonitorStub serviceMonitor; - private MockDeployer deployer; - private NodeRepository nodeRepository; - private Orchestrator orchestrator; - private NodeFailer failer; - - @Before - public void setup() { - clock = new ManualClock(); - curator = new MockCurator(); - nodeRepository = new NodeRepository(NODE_FLAVORS, curator, clock); - NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, NODE_FLAVORS, ZONE); - - createReadyNodes(16, nodeRepository, NODE_FLAVORS); - createHostNodes(3, nodeRepository, NODE_FLAVORS); - - // Create applications - ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Optional.empty()); - ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Optional.empty()); - int wantedNodesApp1 = 5; - int wantedNodesApp2 = 7; - activate(APP_1, clusterApp1, wantedNodesApp1, provisioner); - activate(APP_2, clusterApp2, wantedNodesApp2, provisioner); - assertEquals(wantedNodesApp1, nodeRepository.getNodes(APP_1, Node.State.active).size()); - assertEquals(wantedNodesApp2, nodeRepository.getNodes(APP_2, Node.State.active).size()); - - // Create a deployer ... - Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>(); - apps.put(APP_1, new MockDeployer.ApplicationContext(APP_1, clusterApp1, wantedNodesApp1, Optional.of("default"), 1)); - apps.put(APP_2, new MockDeployer.ApplicationContext(APP_2, clusterApp2, wantedNodesApp2, Optional.of("default"), 1)); - deployer = new MockDeployer(provisioner, apps); - // ... and the other services - hostLivenessTracker = new TestHostLivenessTracker(clock); - serviceMonitor = new ServiceMonitorStub(apps, nodeRepository); - orchestrator = new OrchestratorMock(); - - failer = createFailer(); - } - - private NodeFailer createFailer() { - return new NodeFailer(deployer, hostLivenessTracker, serviceMonitor, nodeRepository, DOWNTIME_LIMIT_ONE_HOUR, clock, orchestrator); - } - @Test public void nodes_for_suspended_applications_are_not_failed() throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException { - orchestrator.suspend(APP_1); + NodeFailTester tester = NodeFailTester.withTwoApplications(); + tester.suspend(NodeFailTester.app1); // Set two nodes down (one for each application) and wait 65 minutes - String host_from_suspended_app = nodeRepository.getNodes(APP_1, Node.State.active).get(1).hostname(); - String host_from_normal_app = nodeRepository.getNodes(APP_2, Node.State.active).get(3).hostname(); - serviceMonitor.setHostDown(host_from_suspended_app); - serviceMonitor.setHostDown(host_from_normal_app); - failer.run(); - clock.advance(Duration.ofMinutes(65)); - failer.run(); - - assertEquals(Node.State.failed, nodeRepository.getNode(host_from_normal_app).get().state()); - assertEquals(Node.State.active, nodeRepository.getNode(host_from_suspended_app).get().state()); + String host_from_suspended_app = tester.nodeRepository.getNodes(NodeFailTester.app1, Node.State.active).get(1).hostname(); + String host_from_normal_app = tester.nodeRepository.getNodes(NodeFailTester.app2, Node.State.active).get(3).hostname(); + tester.serviceMonitor.setHostDown(host_from_suspended_app); + tester.serviceMonitor.setHostDown(host_from_normal_app); + tester.failer.run(); + tester.clock.advance(Duration.ofMinutes(65)); + tester.failer.run(); + + assertEquals(Node.State.failed, tester.nodeRepository.getNode(host_from_normal_app).get().state()); + assertEquals(Node.State.active, tester.nodeRepository.getNode(host_from_suspended_app).get().state()); } @Test public void test_node_failing() throws InterruptedException { + NodeFailTester tester = NodeFailTester.withTwoApplications(); + // For a day all nodes work so nothing happens for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) { - failer.run(); - clock.advance(Duration.ofMinutes(5)); - allNodesMakeAConfigRequestExcept(); + tester.failer.run(); + tester.clock.advance(Duration.ofMinutes(5)); + tester.allNodesMakeAConfigRequestExcept(); - assertEquals( 0, deployer.redeployments); - assertEquals(12, nodeRepository.getNodes(Node.Type.tenant, Node.State.active).size()); - assertEquals( 0, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); - assertEquals( 4, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); + assertEquals( 0, tester.deployer.redeployments); + assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size()); + assertEquals( 0, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals( 4, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); } // Failures are detected on two ready nodes, which are then failed - Node readyFail1 = nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).get(2); - Node readyFail2 = nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).get(3); - nodeRepository.write(readyFail1.with(readyFail1.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.memory_mcelog)))); - nodeRepository.write(readyFail2.with(readyFail2.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.disk_smart)))); - assertEquals(4, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); - failer.run(); - assertEquals(2, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); - assertEquals(Node.State.failed, nodeRepository.getNode(readyFail1.hostname()).get().state()); - assertEquals(Node.State.failed, nodeRepository.getNode(readyFail2.hostname()).get().state()); + Node readyFail1 = tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).get(2); + Node readyFail2 = tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).get(3); + tester.nodeRepository.write(readyFail1.with(readyFail1.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.memory_mcelog)))); + tester.nodeRepository.write(readyFail2.with(readyFail2.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.disk_smart)))); + assertEquals(4, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); + tester.failer.run(); + assertEquals(2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(Node.State.failed, tester.nodeRepository.getNode(readyFail1.hostname()).get().state()); + assertEquals(Node.State.failed, tester.nodeRepository.getNode(readyFail2.hostname()).get().state()); - String downHost1 = nodeRepository.getNodes(APP_1, Node.State.active).get(1).hostname(); - String downHost2 = nodeRepository.getNodes(APP_2, Node.State.active).get(3).hostname(); - serviceMonitor.setHostDown(downHost1); - serviceMonitor.setHostDown(downHost2); + String downHost1 = tester.nodeRepository.getNodes(NodeFailTester.app1, Node.State.active).get(1).hostname(); + String downHost2 = tester.nodeRepository.getNodes(NodeFailTester.app2, Node.State.active).get(3).hostname(); + tester.serviceMonitor.setHostDown(downHost1); + tester.serviceMonitor.setHostDown(downHost2); // nothing happens the first 45 minutes for (int minutes = 0; minutes < 45; minutes +=5 ) { - failer.run(); - clock.advance(Duration.ofMinutes(5)); - allNodesMakeAConfigRequestExcept(); - assertEquals( 0, deployer.redeployments); - assertEquals(12, nodeRepository.getNodes(Node.Type.tenant, Node.State.active).size()); - assertEquals( 2, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); - assertEquals( 2, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); - } - serviceMonitor.setHostUp(downHost1); + tester.failer.run(); + tester.clock.advance(Duration.ofMinutes(5)); + tester.allNodesMakeAConfigRequestExcept(); + assertEquals( 0, tester.deployer.redeployments); + assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size()); + assertEquals( 2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals( 2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); + } + tester.serviceMonitor.setHostUp(downHost1); for (int minutes = 0; minutes < 30; minutes +=5 ) { - failer.run(); - clock.advance(Duration.ofMinutes(5)); - allNodesMakeAConfigRequestExcept(); + tester.failer.run(); + tester.clock.advance(Duration.ofMinutes(5)); + tester.allNodesMakeAConfigRequestExcept(); } // downHost2 should now be failed and replaced, but not downHost1 - assertEquals( 1, deployer.redeployments); - assertEquals(12, nodeRepository.getNodes(Node.Type.tenant, Node.State.active).size()); - assertEquals( 3, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); - assertEquals( 1, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); - assertEquals(downHost2, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).get(0).hostname()); + assertEquals( 1, tester.deployer.redeployments); + assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size()); + assertEquals( 3, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals( 1, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(downHost2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).get(0).hostname()); // downHost1 fails again - serviceMonitor.setHostDown(downHost1); - failer.run(); - clock.advance(Duration.ofMinutes(5)); - allNodesMakeAConfigRequestExcept(); + tester.serviceMonitor.setHostDown(downHost1); + tester.failer.run(); + tester.clock.advance(Duration.ofMinutes(5)); + tester.allNodesMakeAConfigRequestExcept(); // the system goes down and do not have updated information when coming back - clock.advance(Duration.ofMinutes(120)); - failer = createFailer(); - serviceMonitor.setStatusIsKnown(false); - failer.run(); + tester.clock.advance(Duration.ofMinutes(120)); + tester.failer = tester.createFailer(); + tester.serviceMonitor.setStatusIsKnown(false); + tester.failer.run(); // due to this, nothing is failed - assertEquals( 1, deployer.redeployments); - assertEquals(12, nodeRepository.getNodes(Node.Type.tenant, Node.State.active).size()); - assertEquals( 3, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); - assertEquals( 1, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); + assertEquals( 1, tester.deployer.redeployments); + assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size()); + assertEquals( 3, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals( 1, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); // when status becomes known, and the host is still down, it is failed - clock.advance(Duration.ofMinutes(5)); - allNodesMakeAConfigRequestExcept(); - serviceMonitor.setStatusIsKnown(true); - failer.run(); - assertEquals( 2, deployer.redeployments); - assertEquals(12, nodeRepository.getNodes(Node.Type.tenant, Node.State.active).size()); - assertEquals( 4, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); - assertEquals( 0, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); + tester.clock.advance(Duration.ofMinutes(5)); + tester.allNodesMakeAConfigRequestExcept(); + tester.serviceMonitor.setStatusIsKnown(true); + tester.failer.run(); + assertEquals( 2, tester.deployer.redeployments); + assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size()); + assertEquals( 4, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals( 0, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); // the last host goes down - Node lastNode = highestIndex(nodeRepository.getNodes(APP_1, Node.State.active)); - serviceMonitor.setHostDown(lastNode.hostname()); + Node lastNode = tester.highestIndex(tester.nodeRepository.getNodes(NodeFailTester.app1, Node.State.active)); + tester.serviceMonitor.setHostDown(lastNode.hostname()); // it is not failed because there are no ready nodes to replace it for (int minutes = 0; minutes < 75; minutes +=5 ) { - failer.run(); - clock.advance(Duration.ofMinutes(5)); - allNodesMakeAConfigRequestExcept(); - assertEquals( 2, deployer.redeployments); - assertEquals(12, nodeRepository.getNodes(Node.Type.tenant, Node.State.active).size()); - assertEquals( 4, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); - assertEquals( 0, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); + tester.failer.run(); + tester.clock.advance(Duration.ofMinutes(5)); + tester.allNodesMakeAConfigRequestExcept(); + assertEquals( 2, tester.deployer.redeployments); + assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size()); + assertEquals( 4, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals( 0, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); } // A new node is available - createReadyNodes(1, 16, nodeRepository, NODE_FLAVORS); - failer.run(); + tester.createReadyNodes(1, 16); + tester.failer.run(); // The node is now failed - assertEquals( 3, deployer.redeployments); - assertEquals(12, nodeRepository.getNodes(Node.Type.tenant, Node.State.active).size()); - assertEquals( 5, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); - assertEquals( 0, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); + assertEquals( 3, tester.deployer.redeployments); + assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size()); + assertEquals( 5, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); + assertEquals( 0, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); assertTrue("The index of the last failed node is not reused", - highestIndex(nodeRepository.getNodes(APP_1, Node.State.active)).allocation().get().membership().index() + tester.highestIndex(tester.nodeRepository.getNodes(NodeFailTester.app1, Node.State.active)).allocation().get().membership().index() > lastNode.allocation().get().membership().index()); } @Test public void testFailingReadyNodes() { + NodeFailTester tester = NodeFailTester.withTwoApplications(); + // Add ready docker node - createReadyNodes(1, 16, nodeRepository, NODE_FLAVORS.getFlavorOrThrow("docker")); + tester.createReadyNodes(1, 16, "docker"); // For a day all nodes work so nothing happens for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) { - clock.advance(Duration.ofMinutes(5)); - allNodesMakeAConfigRequestExcept(); - failer.run(); - assertEquals( 5, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); + tester.clock.advance(Duration.ofMinutes(5)); + tester.allNodesMakeAConfigRequestExcept(); + tester.failer.run(); + assertEquals( 5, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); } - List<Node> ready = nodeRepository.getNodes(Node.Type.tenant, Node.State.ready); + List<Node> ready = tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready); - // Two ready nodes die and a ready docker node "dies" (Vespa does not run when in ready state for docker node, so - // it does not mae config requests) - clock.advance(Duration.ofMinutes(180)); - Node dockerNode = ready.stream().filter(node -> node.flavor() == NODE_FLAVORS.getFlavorOrThrow("docker")).findFirst().get(); + // Two ready nodes die and a ready docker node "dies" + // (Vespa does not run when in ready state for docker node, so it does not make config requests) + tester.clock.advance(Duration.ofMinutes(180)); + Node dockerNode = ready.stream().filter(node -> node.flavor() == NodeFailTester.nodeFlavors.getFlavorOrThrow("docker")).findFirst().get(); List<Node> otherNodes = ready.stream() - .filter(node -> node.flavor() != NODE_FLAVORS.getFlavorOrThrow("docker")) + .filter(node -> node.flavor() != NodeFailTester.nodeFlavors.getFlavorOrThrow("docker")) .collect(Collectors.toList()); - allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode); - failer.run(); - assertEquals( 3, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); - assertEquals( 2, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); + tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode); + tester.failer.run(); + assertEquals( 3, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals( 2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); // Another ready node die - clock.advance(Duration.ofMinutes(180)); - allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode, otherNodes.get(3)); - failer.run(); - assertEquals( 2, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); - assertEquals(ready.get(1), nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).get(0)); - assertEquals( 3, nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).size()); - } - - private void allNodesMakeAConfigRequestExcept(Node ... deadNodeArray) { - Set<Node> deadNodes = new HashSet<>(Arrays.asList(deadNodeArray)); - for (Node node : nodeRepository.getNodes(Node.Type.tenant)) { - if ( ! deadNodes.contains(node) && node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) - hostLivenessTracker.receivedRequestFrom(node.hostname()); - } - } - - private void createReadyNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) { - createReadyNodes(count, 0, nodeRepository, nodeFlavors); - } - - private void createReadyNodes(int count, int startIndex, NodeRepository nodeRepository, NodeFlavors nodeFlavors) { - createReadyNodes(count, startIndex, nodeRepository, nodeFlavors.getFlavorOrThrow("default")); - } - - private void createReadyNodes(int count, int startIndex, NodeRepository nodeRepository, Flavor flavor) { - List<Node> nodes = new ArrayList<>(count); - for (int i = startIndex; i < startIndex + count; i++) - nodes.add(nodeRepository.createNode("node" + i, "host" + i, Optional.empty(), flavor, Node.Type.tenant)); - nodes = nodeRepository.addNodes(nodes); - nodeRepository.setReady(nodes); - } - - private void createHostNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) { - List<Node> nodes = new ArrayList<>(count); - for (int i = 0; i < count; i++) - nodes.add(nodeRepository.createNode("parent" + i, "parent" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.host)); - nodes = nodeRepository.addNodes(nodes); - nodeRepository.setReady(nodes); - } - - private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodeCount, NodeRepositoryProvisioner provisioner) { - List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, Capacity.fromNodeCount(nodeCount), 1, null); - NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator)); - provisioner.activate(transaction, applicationId, hosts); - transaction.commit(); - } - - /** Returns the node with the highest membership index from the given set of allocated nodes */ - private Node highestIndex(List<Node> nodes) { - Node highestIndex = null; - for (Node node : nodes) { - if (highestIndex == null || node.allocation().get().membership().index() > - highestIndex.allocation().get().membership().index()) - highestIndex = node; - } - return highestIndex; - } - - /** This is a fully functional implementation */ - private static class TestHostLivenessTracker implements HostLivenessTracker { - - private final Clock clock; - private final Map<String, Instant> lastRequestFromHost = new HashMap<>(); - - public TestHostLivenessTracker(Clock clock) { - this.clock = clock; - } - - @Override - public void receivedRequestFrom(String hostname) { - lastRequestFromHost.put(hostname, clock.instant()); - } - - @Override - public Optional<Instant> lastRequestFrom(String hostname) { - return Optional.ofNullable(lastRequestFromHost.get(hostname)); - } - + tester.clock.advance(Duration.ofMinutes(180)); + tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode, otherNodes.get(3)); + tester.failer.run(); + assertEquals( 2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size()); + assertEquals(ready.get(1), tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).get(0)); + assertEquals( 3, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size()); } - private static class ServiceMonitorStub implements ServiceMonitor { - - private final Map<ApplicationId, MockDeployer.ApplicationContext> apps; - private final NodeRepository nodeRepository; - - private Set<String> downHosts = new HashSet<>(); - private boolean statusIsKnown = true; - - /** Create a service monitor where all nodes are initially up */ - public ServiceMonitorStub(Map<ApplicationId, MockDeployer.ApplicationContext> apps, NodeRepository nodeRepository) { - this.apps = apps; - this.nodeRepository = nodeRepository; - } - - public void setHostDown(String hostname) { - downHosts.add(hostname); - } - - public void setHostUp(String hostname) { - downHosts.remove(hostname); - } - - public void setStatusIsKnown(boolean statusIsKnown) { - this.statusIsKnown = statusIsKnown; - } - - private ServiceMonitorStatus getHostStatus(String hostname) { - if ( ! statusIsKnown) return ServiceMonitorStatus.NOT_CHECKED; - if (downHosts.contains(hostname)) return ServiceMonitorStatus.DOWN; - return ServiceMonitorStatus.UP; - } - - @Override - public Map<ApplicationInstanceReference, ApplicationInstance<ServiceMonitorStatus>> queryStatusOfAllApplicationInstances() { - // Convert apps information to the response payload to return - Map<ApplicationInstanceReference, ApplicationInstance<ServiceMonitorStatus>> status = new HashMap<>(); - for (Map.Entry<ApplicationId, MockDeployer.ApplicationContext> app : apps.entrySet()) { - Set<ServiceInstance<ServiceMonitorStatus>> serviceInstances = new HashSet<>(); - for (Node node : nodeRepository.getNodes(app.getValue().id(), Node.State.active)) { - serviceInstances.add(new ServiceInstance<>(new ConfigId("configid"), - new HostName(node.hostname()), - getHostStatus(node.hostname()))); - } - Set<ServiceCluster<ServiceMonitorStatus>> serviceClusters = new HashSet<>(); - serviceClusters.add(new ServiceCluster<>(new ClusterId(app.getValue().cluster().id().value()), - new ServiceType("serviceType"), - serviceInstances)); - TenantId tenantId = new TenantId(app.getKey().tenant().value()); - ApplicationInstanceId applicationInstanceId = new ApplicationInstanceId(app.getKey().application().value()); - status.put(new ApplicationInstanceReference(tenantId, applicationInstanceId), - new ApplicationInstance<>(tenantId, applicationInstanceId, serviceClusters)); - } - return status; - } - - } - - class OrchestratorMock implements Orchestrator { - - Set<ApplicationId> suspendedApplications = new HashSet<>(); - - @Override - public HostStatus getNodeStatus(HostName hostName) throws HostNameNotFoundException { - return null; - } - - @Override - public void resume(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {} - - @Override - public void suspend(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {} - - @Override - public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationId appId) throws ApplicationIdNotFoundException { - return suspendedApplications.contains(appId) - ? ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN : ApplicationInstanceStatus.NO_REMARKS; - } + @Test + public void testFailingProxyNodes() { + NodeFailTester tester = NodeFailTester.withProxyApplication(); - @Override - public Set<ApplicationId> getAllSuspendedApplications() { - return null; - } + // For a day all nodes work so nothing happens + for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) { + tester.failer.run(); + tester.clock.advance(Duration.ofMinutes(5)); + tester.allNodesMakeAConfigRequestExcept(); - @Override - public void resume(ApplicationId appId) throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException { - suspendedApplications.remove(appId); + assertEquals(16, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size()); } - @Override - public void suspend(ApplicationId appId) throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException { - suspendedApplications.add(appId); - } + Set<String> downHosts = new HashSet<>(); + downHosts.add("host4"); + downHosts.add("host5"); - @Override - public void suspendAll(HostName parentHostname, List<HostName> hostNames) throws BatchInternalErrorException, BatchHostStateChangeDeniedException, BatchHostNameNotFoundException { - throw new RuntimeException("Not implemented"); - } + for (String downHost : downHosts) + tester.serviceMonitor.setHostDown(downHost); + // nothing happens the first 45 minutes + for (int minutes = 0; minutes < 45; minutes +=5 ) { + tester.failer.run(); + tester.clock.advance(Duration.ofMinutes(5)); + tester.allNodesMakeAConfigRequestExcept(); + assertEquals( 0, tester.deployer.redeployments); + assertEquals(16, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size()); + assertEquals( 0, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.failed).size()); + } + + tester.clock.advance(Duration.ofMinutes(60)); + tester.failer.run(); + + // one down host should now be failed, but not two as we are only allowed to fail one proxy + assertEquals( 1, tester.deployer.redeployments); + assertEquals(15, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size()); + assertEquals( 1, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.failed).size()); + String failedHost1 = tester.nodeRepository.getNodes(NodeType.proxy, Node.State.failed).get(0).hostname(); + assertTrue(downHosts.contains(failedHost1)); + + // trying to fail again will still not fail the other down host + tester.clock.advance(Duration.ofMinutes(60)); + tester.failer.run(); + assertEquals(15, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size()); + + // The first down host is removed, which causes the second one to be moved to failed + tester.nodeRepository.remove(failedHost1); + tester.failer.run(); + assertEquals( 2, tester.deployer.redeployments); + assertEquals(14, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size()); + assertEquals( 1, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.failed).size()); + String failedHost2 = tester.nodeRepository.getNodes(NodeType.proxy, Node.State.failed).get(0).hostname(); + assertFalse(failedHost1.equals(failedHost2)); + assertTrue(downHosts.contains(failedHost2)); } - + } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java index a9859244569..c936bf038ca 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ReservationExpirerTest.java @@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.provision.maintenance; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.Zone; import com.yahoo.test.ManualClock; import com.yahoo.vespa.curator.Curator; @@ -40,26 +41,26 @@ public class ReservationExpirerTest { NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, flavors, Zone.defaultZone(), clock); List<Node> nodes = new ArrayList<>(2); - nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.host)); + nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.host)); nodes = nodeRepository.addNodes(nodes); // Reserve 2 nodes - assertEquals(2, nodeRepository.getNodes(Node.Type.tenant, Node.State.provisioned).size()); + assertEquals(2, nodeRepository.getNodes(NodeType.tenant, Node.State.provisioned).size()); nodeRepository.setReady(nodes); ApplicationId applicationId = new ApplicationId.Builder().tenant("foo").applicationName("bar").instanceName("fuz").build(); ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Optional.empty()); provisioner.prepare(applicationId, cluster, Capacity.fromNodeCount(2), 1, null); - assertEquals(2, nodeRepository.getNodes(Node.Type.tenant, Node.State.reserved).size()); + assertEquals(2, nodeRepository.getNodes(NodeType.tenant, Node.State.reserved).size()); // Reservation times out clock.advance(Duration.ofMinutes(14)); // Reserved but not used time out new ReservationExpirer(nodeRepository, clock, Duration.ofMinutes(10)).run(); // Assert nothing is reserved - assertEquals(0, nodeRepository.getNodes(Node.Type.tenant, Node.State.reserved).size()); - List<Node> dirty = nodeRepository.getNodes(Node.Type.tenant, Node.State.dirty); + assertEquals(0, nodeRepository.getNodes(NodeType.tenant, Node.State.reserved).size()); + List<Node> dirty = nodeRepository.getNodes(NodeType.tenant, Node.State.dirty); assertEquals(2, dirty.size()); assertFalse(dirty.get(0).allocation().isPresent()); assertFalse(dirty.get(1).allocation().isPresent()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java index 834c72aad61..3b9f4469b01 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java @@ -8,6 +8,7 @@ import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.HostSpec; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; @@ -66,7 +67,7 @@ public class RetiredExpirerTest { clock.advance(Duration.ofHours(30)); // Retire period spent MockDeployer deployer = new MockDeployer(provisioner, - Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId, cluster, wantedNodes, Optional.of("default"), 1))); + Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId, cluster, Capacity.fromNodeCount(wantedNodes, Optional.of("default")), 1))); new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofHours(12)).run(); assertEquals(3, nodeRepository.getNodes(applicationId, Node.State.active).size()); assertEquals(4, nodeRepository.getNodes(applicationId, Node.State.inactive).size()); @@ -100,7 +101,7 @@ public class RetiredExpirerTest { clock.advance(Duration.ofHours(30)); // Retire period spent MockDeployer deployer = new MockDeployer(provisioner, - Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId, cluster, 1, Optional.of("default"), 1))); + Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId, cluster, Capacity.fromNodeCount(1, Optional.of("default")), 1))); new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofHours(12)).run(); assertEquals(1, nodeRepository.getNodes(applicationId, Node.State.active).size()); assertEquals(7, nodeRepository.getNodes(applicationId, Node.State.inactive).size()); @@ -121,7 +122,7 @@ public class RetiredExpirerTest { private void createReadyNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) { List<Node> nodes = new ArrayList<>(count); for (int i = 0; i < count; i++) - nodes.add(nodeRepository.createNode("node" + i, "node" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant)); + nodes.add(nodeRepository.createNode("node" + i, "node" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant)); nodes = nodeRepository.addNodes(nodes); nodeRepository.setReady(nodes); } @@ -129,7 +130,7 @@ public class RetiredExpirerTest { private void createHostNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) { List<Node> nodes = new ArrayList<>(count); for (int i = 0; i < count; i++) - nodes.add(nodeRepository.createNode("parent" + i, "parent" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.host)); + nodes.add(nodeRepository.createNode("parent" + i, "parent" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host)); nodes = nodeRepository.addNodes(nodes); nodeRepository.setReady(nodes); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ZooKeeperAccessMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ZooKeeperAccessMaintainerTest.java index 4d57980fff1..89b4995e22f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ZooKeeperAccessMaintainerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ZooKeeperAccessMaintainerTest.java @@ -1,5 +1,6 @@ package com.yahoo.vespa.hosted.provision.maintenance; +import com.yahoo.config.provision.NodeType; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeRepositoryTester; import com.yahoo.vespa.zookeeper.ZooKeeperServer; @@ -29,29 +30,29 @@ public class ZooKeeperAccessMaintainerTest { maintainer.maintain(); assertEquals(asSet("server1,server2"), asSet(System.getProperty(ZooKeeperServer.ZOOKEEPER_VESPA_CLIENTS_PROPERTY))); - tester.addNode("id1", "host1", "default", Node.Type.tenant); - tester.addNode("id2", "host2", "default", Node.Type.tenant); - tester.addNode("id3", "host3", "default", Node.Type.tenant); + tester.addNode("id1", "host1", "default", NodeType.tenant); + tester.addNode("id2", "host2", "default", NodeType.tenant); + tester.addNode("id3", "host3", "default", NodeType.tenant); maintainer.maintain(); - assertEquals(3, tester.getNodes(Node.Type.tenant).size()); - assertEquals(0, tester.getNodes(Node.Type.proxy).size()); + assertEquals(3, tester.getNodes(NodeType.tenant).size()); + assertEquals(0, tester.getNodes(NodeType.proxy).size()); assertEquals(asSet("host1,host2,host3,server1,server2"), asSet(System.getProperty(ZooKeeperServer.ZOOKEEPER_VESPA_CLIENTS_PROPERTY))); - tester.addNode("proxy1", "host4", "default", Node.Type.proxy); - tester.addNode("proxy2", "host5", "default", Node.Type.proxy); + tester.addNode("proxy1", "host4", "default", NodeType.proxy); + tester.addNode("proxy2", "host5", "default", NodeType.proxy); maintainer.maintain(); - assertEquals(3, tester.getNodes(Node.Type.tenant).size()); - assertEquals(2, tester.getNodes(Node.Type.proxy).size()); + assertEquals(3, tester.getNodes(NodeType.tenant).size()); + assertEquals(2, tester.getNodes(NodeType.proxy).size()); assertEquals(asSet("host1,host2,host3,host4,host5,server1,server2"), asSet(System.getProperty(ZooKeeperServer.ZOOKEEPER_VESPA_CLIENTS_PROPERTY))); tester.nodeRepository().move("host2", Node.State.parked); assertTrue(tester.nodeRepository().remove("host2")); maintainer.maintain(); - assertEquals(2, tester.getNodes(Node.Type.tenant).size()); - assertEquals(2, tester.getNodes(Node.Type.proxy).size()); + assertEquals(2, tester.getNodes(NodeType.tenant).size()); + assertEquals(2, tester.getNodes(NodeType.proxy).size()); assertEquals(asSet("host1,host3,host4,host5,server1,server2"), asSet(System.getProperty(ZooKeeperServer.ZOOKEEPER_VESPA_CLIENTS_PROPERTY))); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/monitoring/ProvisionMetricsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/monitoring/ProvisionMetricsTest.java index a66d127a49c..2e6f7d1867f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/monitoring/ProvisionMetricsTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/monitoring/ProvisionMetricsTest.java @@ -1,6 +1,7 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.monitoring; +import com.yahoo.config.provision.NodeType; import com.yahoo.jdisc.Metric; import com.yahoo.vespa.curator.Curator; import com.yahoo.vespa.curator.mock.MockCurator; @@ -27,9 +28,9 @@ public class ProvisionMetricsTest { final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default"); final Curator curator = new MockCurator(); final NodeRepository nodeRepository = new NodeRepository(nodeFlavors, curator); - final Node node = nodeRepository.createNode("openStackId", "hostname", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant); + final Node node = nodeRepository.createNode("openStackId", "hostname", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant); nodeRepository.addNodes(Collections.singletonList(node)); - final Node hostNode = nodeRepository.createNode("openStackId2", "parent", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.host); + final Node hostNode = nodeRepository.createNode("openStackId2", "parent", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host); nodeRepository.addNodes(Collections.singletonList(hostNode)); final Map<String, Number> expectedMetrics = new HashMap<>(); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java index 3873ff1de98..f9f44fb55b2 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java @@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.provision.persistence; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ApplicationName; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.TenantName; import com.yahoo.vespa.curator.Curator; import com.yahoo.vespa.curator.mock.MockCurator; @@ -30,7 +31,7 @@ public class CuratorDatabaseClientTest { List<Node> allocatedNodes = zkClient.getNodes(Node.State.ready); assertEquals(1, allocatedNodes.size()); - assertEquals(Node.Type.host, allocatedNodes.get(0).type()); + assertEquals(NodeType.host, allocatedNodes.get(0).type()); } /** Test that locks can be acquired and released */ diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/SerializationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/SerializationTest.java index 934b465b624..f05143a05b6 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/SerializationTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/SerializationTest.java @@ -6,6 +6,7 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ApplicationName; import com.yahoo.config.provision.ClusterMembership; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.TenantName; import com.yahoo.test.ManualClock; import com.yahoo.text.Utf8; @@ -65,7 +66,7 @@ public class SerializationTest { node = node.with(node.status().withVespaVersion(Version.fromString("1.2.3"))); node = node.with(node.status().withIncreasedFailCount().withIncreasedFailCount()); node = node.with(node.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.memory_mcelog))); - node = node.with(Node.Type.tenant); + node = node.with(NodeType.tenant); Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node)); assertEquals(node.id(), copy.id()); @@ -84,7 +85,7 @@ public class SerializationTest { assertEquals(node.allocation().get().isRemovable(), copy.allocation().get().isRemovable()); assertEquals(1, copy.history().events().size()); assertEquals(clock.instant(), copy.history().event(History.Event.Type.reserved).get().at()); - assertEquals(Node.Type.tenant, copy.type()); + assertEquals(NodeType.tenant, copy.type()); } @Test @@ -95,7 +96,7 @@ public class SerializationTest { ClusterMembership.from("content/myId/0/0", Optional.empty()), clock.instant()); Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node)); - assertEquals(Node.Type.host, copy.type()); + assertEquals(NodeType.host, copy.type()); } @Test @@ -133,7 +134,7 @@ public class SerializationTest { assertEquals(3, node.allocation().get().restartGeneration().wanted()); assertEquals(4, node.allocation().get().restartGeneration().current()); assertTrue(node.allocation().get().isRemovable()); - assertEquals(Node.Type.tenant, node.type()); + assertEquals(NodeType.tenant, node.type()); } // TODO: Remove when 6.31 is deployed everywhere @@ -310,14 +311,14 @@ public class SerializationTest { @Test public void serialize_parentHostname() { final String parentHostname = "parent.yahoo.com"; - Node node = Node.create("myId", "myHostname", Optional.of(parentHostname), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant); + Node node = Node.create("myId", "myHostname", Optional.of(parentHostname), nodeFlavors.getFlavorOrThrow("default"), NodeType.tenant); Node deserializedNode = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(node)); assertEquals(parentHostname, deserializedNode.parentHostname().get()); } private Node createNode() { - return Node.create("myId", "myHostname", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.host); + return Node.create("myId", "myHostname", Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host); } } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java index acd687e197e..fa00b7d60b8 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java @@ -131,7 +131,9 @@ public class MultigroupProvisioningTest { tester.advanceTime(Duration.ofDays(7)); MockDeployer deployer = new MockDeployer(tester.provisioner(), - Collections.singletonMap(application1, new MockDeployer.ApplicationContext(application1, cluster(), 8, Optional.of("large"), 1))); + Collections.singletonMap(application1, + new MockDeployer.ApplicationContext(application1, cluster(), + Capacity.fromNodeCount(8, Optional.of("large")), 1))); new RetiredExpirer(tester.nodeRepository(), deployer, tester.clock(), Duration.ofHours(12)).run(); assertEquals(8, tester.getNodes(application1, Node.State.inactive).flavor("small").size()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java new file mode 100644 index 00000000000..fd6d5e8e455 --- /dev/null +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/NodeTypeProvisioningTest.java @@ -0,0 +1,95 @@ +package com.yahoo.vespa.hosted.provision.provisioning; + +import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.Capacity; +import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.Environment; +import com.yahoo.config.provision.HostFilter; +import com.yahoo.config.provision.HostSpec; +import com.yahoo.config.provision.NodeType; +import com.yahoo.config.provision.RegionName; +import com.yahoo.config.provision.Zone; +import com.yahoo.transaction.NestedTransaction; +import com.yahoo.vespa.hosted.provision.Node; +import org.junit.Test; + +import java.util.HashSet; +import java.util.List; +import java.util.Optional; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +/** + * Tests provisioning by node type instead of by count and flavor + * + * @author bratseth + */ +public class NodeTypeProvisioningTest { + + @Test + public void proxy_deployment() { + ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east"))); + + tester.makeReadyNodes( 1, "small", NodeType.proxy); + tester.makeReadyNodes( 3, "small", NodeType.host); + tester.makeReadyNodes( 5, "small", NodeType.tenant); + tester.makeReadyNodes(10, "large", NodeType.proxy); + tester.makeReadyNodes(20, "large", NodeType.host); + tester.makeReadyNodes(40, "large", NodeType.tenant); + + ApplicationId application = tester.makeApplicationId(); // application using proxy nodes + + + { // Deploy + List<HostSpec> hosts = deployProxies(application, tester); + assertEquals("Reserved all proxies", 11, hosts.size()); + tester.activate(application, new HashSet<>(hosts)); + List<Node> nodes = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active); + assertEquals("Activated all proxies", 11, nodes.size()); + for (Node node : nodes) + assertEquals(NodeType.proxy, node.type()); + } + + { // Redeploy with no changes + List<HostSpec> hosts = deployProxies(application, tester); + assertEquals(11, hosts.size()); + tester.activate(application, new HashSet<>(hosts)); + List<Node> nodes = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active); + assertEquals(11, nodes.size()); + } + + { // Add 2 ready proxies then redeploy + tester.makeReadyNodes(2, "small", NodeType.proxy); + List<HostSpec> hosts = deployProxies(application, tester); + assertEquals(13, hosts.size()); + tester.activate(application, new HashSet<>(hosts)); + List<Node> nodes = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active); + assertEquals(13, nodes.size()); + } + + { // Remove 3 proxies then redeploy + List<Node> nodes = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active); + tester.nodeRepository().fail(nodes.get(0).hostname()); + tester.nodeRepository().fail(nodes.get(1).hostname()); + tester.nodeRepository().fail(nodes.get(5).hostname()); + + List<HostSpec> hosts = deployProxies(application, tester); + assertEquals(10, hosts.size()); + tester.activate(application, new HashSet<>(hosts)); + nodes = tester.nodeRepository().getNodes(NodeType.proxy, Node.State.active); + assertEquals(10, nodes.size()); + } + } + + private List<HostSpec> deployProxies(ApplicationId application, ProvisioningTester tester) { + return tester.prepare(application, + ClusterSpec.request(ClusterSpec.Type.container, + ClusterSpec.Id.from("test"), + Optional.empty()), + Capacity.fromRequiredNodeType(NodeType.proxy), + 1); + + } + +} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java index f53673898bb..2256c8d8645 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisionTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java @@ -13,7 +13,10 @@ import com.yahoo.config.provision.OutOfCapacityException; import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.Zone; import com.yahoo.transaction.NestedTransaction; +import com.yahoo.vespa.config.nodes.NodeRepositoryConfig; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.node.Flavor; +import com.yahoo.vespa.hosted.provision.testutils.FlavorConfigBuilder; import org.junit.Ignore; import org.junit.Test; @@ -21,6 +24,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; +import java.util.Collections; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -33,7 +37,7 @@ import java.util.stream.Collectors; * * @author bratseth */ -public class ProvisionTest { +public class ProvisioningTest { @Test public void application_deployment_constant_application_size() { @@ -466,8 +470,24 @@ public class ProvisionTest { } @Test - public void application_deployment_allocates_cheapest_available() { - ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east"))); + public void application_deployment_prefers_cheapest_stock_nodes() { + assertCorrectFlavorPreferences(true); + } + + @Test + public void application_deployment_prefers_exact_nonstock_nodes() { + assertCorrectFlavorPreferences(false); + } + + private void assertCorrectFlavorPreferences(boolean largeIsStock) { + FlavorConfigBuilder b = new FlavorConfigBuilder(); + b.addFlavor("large", 4., 8., 100, Flavor.Type.BARE_METAL).cost(10).stock(largeIsStock); + NodeRepositoryConfig.Flavor.Builder largeVariant = b.addFlavor("large-variant", 3., 9., 101, Flavor.Type.BARE_METAL).cost(9); + b.addReplaces("large", largeVariant); + NodeRepositoryConfig.Flavor.Builder largeVariantVariant = b.addFlavor("large-variant-variant", 4., 9., 101, Flavor.Type.BARE_METAL).cost(11); + b.addReplaces("large-variant", largeVariantVariant); + + ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), b.build()); tester.makeReadyNodes(6, "large"); //cost = 10 tester.makeReadyNodes(6, "large-variant"); //cost = 9 tester.makeReadyNodes(6, "large-variant-variant"); //cost = 11 @@ -476,28 +496,34 @@ public class ProvisionTest { ClusterSpec contentClusterSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Optional.empty()); ClusterSpec containerClusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer"), Optional.empty()); + List<HostSpec> containerNodes = tester.prepare(applicationId, containerClusterSpec, 5, 1, "large"); + List<HostSpec> contentNodes = tester.prepare(applicationId, contentClusterSpec, 10, 1, "large"); - List<HostSpec> containerNodes = tester.prepare(applicationId, containerClusterSpec, 5, 1, "large"); //should be replaced by 5 large-variant - List<HostSpec> contentNodes = tester.prepare(applicationId, contentClusterSpec, 10, 1, "large"); // should give 1 large-variant, 6 large and 3 large-variant-variant - - tester.assertNumberOfNodesWithFlavor(containerNodes, "large-variant", 5); - tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant", 1); - tester.assertNumberOfNodesWithFlavor(contentNodes, "large", 6); + if (largeIsStock) { // 'large' is replaced by 'large-variant' when possible, as it is cheaper + tester.assertNumberOfNodesWithFlavor(containerNodes, "large-variant", 5); + tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant", 1); + tester.assertNumberOfNodesWithFlavor(contentNodes, "large", 6); + } + else { // 'large' is preferred when available, as it is what is exactly specified + tester.assertNumberOfNodesWithFlavor(containerNodes, "large", 5); + tester.assertNumberOfNodesWithFlavor(contentNodes, "large", 1); + tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant", 6); + } + // in both cases the most expensive, never exactly specified is least preferred tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant-variant", 3); } - private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size, int content1Size, String flavor, ProvisioningTester tester) { // "deploy prepare" with a two container clusters and a storage cluster having of two groups ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0"), Optional.empty()); ClusterSpec containerCluster1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container1"), Optional.empty()); - ClusterSpec contentCluster0 = ClusterSpec.from(ClusterSpec.Type.content, ClusterSpec.Id.from("content0"), ClusterSpec.Group.from(0), Optional.empty()); - ClusterSpec contentCluster1 = ClusterSpec.from(ClusterSpec.Type.content, ClusterSpec.Id.from("content1"), ClusterSpec.Group.from(0), Optional.empty()); + ClusterSpec contentCluster0 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content0"), Optional.empty()); + ClusterSpec contentCluster1 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("content1"), Optional.empty()); - Set<HostSpec> container0 = new HashSet<>(tester.prepare(application, containerCluster0, container0Size, 1, flavor)); - Set<HostSpec> container1 = new HashSet<>(tester.prepare(application, containerCluster1, container1Size, 1, flavor)); - Set<HostSpec> content0 = new HashSet<>(tester.prepare(application, contentCluster0, content0Size, 1, flavor)); - Set<HostSpec> content1 = new HashSet<>(tester.prepare(application, contentCluster1, content1Size, 1, flavor)); + Set<HostSpec> container0 = prepare(application, containerCluster0, container0Size, 1, flavor, tester); + Set<HostSpec> container1 = prepare(application, containerCluster1, container1Size, 1, flavor, tester); + Set<HostSpec> content0 = prepare(application, contentCluster0, content0Size, 1, flavor, tester); + Set<HostSpec> content1 = prepare(application, contentCluster1, content1Size, 1, flavor, tester); Set<HostSpec> allHosts = new HashSet<>(); allHosts.addAll(container0); @@ -527,6 +553,11 @@ public class ProvisionTest { return new SystemState(allHosts, container0, container1, content0, content1); } + private Set<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, int nodeCount, int groups, String flavor, ProvisioningTester tester) { + if (nodeCount == 0) return Collections.emptySet(); // this is a shady practice + return new HashSet<>(tester.prepare(application, cluster, nodeCount, groups, flavor)); + } + private static class SystemState { private Set<HostSpec> allHosts; diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java index 0e32bbb79d5..6aea7ae4d61 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java @@ -8,6 +8,7 @@ import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.HostFilter; import com.yahoo.config.provision.HostSpec; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.ProvisionLogger; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; @@ -71,6 +72,20 @@ public class ProvisioningTester implements AutoCloseable { } } + public ProvisioningTester(Zone zone, NodeRepositoryConfig config) { + try { + nodeFlavors = new NodeFlavors(config); + clock = new ManualClock(); + nodeRepository = new NodeRepository(nodeFlavors, curator, clock); + provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone, clock); + capacityPolicies = new CapacityPolicies(zone, nodeFlavors); + provisionLogger = new NullProvisionLogger(); + } + catch (Exception e) { + throw new RuntimeException(e); + } + } + private NodeRepositoryConfig createConfig() { FlavorConfigBuilder b = new FlavorConfigBuilder(); b.addFlavor("default", 2., 4., 100, Flavor.Type.BARE_METAL).cost(3); @@ -78,7 +93,7 @@ public class ProvisioningTester implements AutoCloseable { b.addFlavor("docker1", 1., 1., 10, Flavor.Type.DOCKER_CONTAINER).cost(1); b.addFlavor("v-4-8-100", 4., 8., 100, Flavor.Type.VIRTUAL_MACHINE).cost(4); b.addFlavor("old-large1", 2., 4., 100, Flavor.Type.BARE_METAL).cost(6); - b.addFlavor("old-large2", 2., 5., 100, Flavor.Type.BARE_METAL).cost(8); + b.addFlavor("old-large2", 2., 5., 100, Flavor.Type.BARE_METAL).cost(14); NodeRepositoryConfig.Flavor.Builder large = b.addFlavor("large", 4., 8., 100, Flavor.Type.BARE_METAL).cost(10); b.addReplaces("old-large1", large); b.addReplaces("old-large2", large); @@ -113,6 +128,7 @@ public class ProvisioningTester implements AutoCloseable { public NodeRepositoryProvisioner provisioner() { return provisioner; } public CapacityPolicies capacityPolicies() { return capacityPolicies; } public NodeList getNodes(ApplicationId id, Node.State ... inState) { return new NodeList(nodeRepository.getNodes(id, inState)); } + public NodeFlavors flavors() { return nodeFlavors; } public void patchNode(Node node) { nodeRepository.write(node); } @@ -120,7 +136,6 @@ public class ProvisioningTester implements AutoCloseable { return prepare(application, cluster, Capacity.fromNodeCount(nodeCount, Optional.ofNullable(flavor)), groups); } public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity, int groups) { - if (capacity.nodeCount() == 0) return Collections.emptyList(); Set<String> reservedBefore = toHostNames(nodeRepository.getNodes(application, Node.State.reserved)); Set<String> inactiveBefore = toHostNames(nodeRepository.getNodes(application, Node.State.inactive)); // prepare twice to ensure idempotence @@ -166,7 +181,7 @@ public class ProvisioningTester implements AutoCloseable { public void fail(HostSpec host) { int beforeFailCount = nodeRepository.getNode(host.hostname(), Node.State.active).get().status().failCount(); Node failedNode = nodeRepository.fail(host.hostname()); - assertTrue(nodeRepository.getNodes(Node.Type.tenant, Node.State.failed).contains(failedNode)); + assertTrue(nodeRepository.getNodes(NodeType.tenant, Node.State.failed).contains(failedNode)); assertEquals(beforeFailCount + 1, failedNode.status().failCount()); } @@ -200,13 +215,17 @@ public class ProvisioningTester implements AutoCloseable { } public List<Node> makeReadyNodes(int n, String flavor) { + return makeReadyNodes(n, flavor, NodeType.tenant); + } + + public List<Node> makeReadyNodes(int n, String flavor, NodeType type) { List<Node> nodes = new ArrayList<>(n); for (int i = 0; i < n; i++) nodes.add(nodeRepository.createNode(UUID.randomUUID().toString(), UUID.randomUUID().toString(), Optional.empty(), - nodeFlavors.getFlavorOrThrow(flavor), - Node.Type.tenant)); + nodeFlavors.getFlavorOrThrow(flavor), + type)); nodes = nodeRepository.addNodes(nodes); nodeRepository.setReady(nodes); return nodes; @@ -223,7 +242,7 @@ public class ProvisioningTester implements AutoCloseable { for (int i = 0; i < n; i++) { final String hostname = UUID.randomUUID().toString(); nodes.add(nodeRepository.createNode("openstack-id", hostname, parentHostId, - nodeFlavors.getFlavorOrThrow(flavor), Node.Type.tenant)); + nodeFlavors.getFlavorOrThrow(flavor), NodeType.tenant)); } nodes = nodeRepository.addNodes(nodes); nodeRepository.setReady(nodes); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java index 9e27d7a8297..7b166e957ef 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/VirtualNodeProvisioningTest.java @@ -29,11 +29,11 @@ import static org.junit.Assert.assertNotNull; * @author mpolden */ public class VirtualNodeProvisioningTest { + private static final String flavor = "v-4-8-100"; private static final ClusterSpec contentClusterSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Optional.empty()); private static final ClusterSpec containerClusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer"), Optional.empty()); - private ProvisioningTester tester; private ApplicationId applicationId; @@ -166,7 +166,6 @@ public class VirtualNodeProvisioningTest { } @Test(expected = OutOfCapacityException.class) - // TODO Should fail with something else than OutOfCapacityException public void fail_when_too_few_distinct_parent_hosts() { tester.makeReadyVirtualNodes(2, flavor, "parentHost1"); tester.makeReadyVirtualNodes(1, flavor, "parentHost2"); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionResourceTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionResourceTest.java deleted file mode 100644 index 7fac31b01e7..00000000000 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/legacy/ProvisionResourceTest.java +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.restapi.legacy; - -import com.yahoo.config.provision.ApplicationId; -import com.yahoo.config.provision.ApplicationName; -import com.yahoo.config.provision.Capacity; -import com.yahoo.config.provision.ClusterSpec; -import com.yahoo.config.provision.HostSpec; -import com.yahoo.config.provision.InstanceName; -import com.yahoo.config.provision.TenantName; -import com.yahoo.config.provision.Zone; -import com.yahoo.transaction.NestedTransaction; -import com.yahoo.vespa.curator.Curator; -import com.yahoo.vespa.curator.mock.MockCurator; -import com.yahoo.vespa.hosted.provision.Node; -import com.yahoo.vespa.hosted.provision.NodeRepository; -import com.yahoo.vespa.hosted.provision.node.NodeFlavors; -import com.yahoo.vespa.hosted.provision.provisioning.NodeRepositoryProvisioner; -import com.yahoo.vespa.curator.transaction.CuratorTransaction; -import com.yahoo.vespa.hosted.provision.testutils.FlavorConfigBuilder; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import java.util.UUID; - -import static org.junit.Assert.assertEquals; - -/** - * @author mortent - */ -public class ProvisionResourceTest { - - NodeRepository nodeRepository; - NodeFlavors nodeFlavors; - ProvisionResource provisionResource; - Curator curator; - int capacity = 2; - ApplicationId application; - private NodeRepositoryProvisioner provisioner; - - @Before - public void setUpTest() throws Exception { - curator = new MockCurator(); - nodeFlavors = FlavorConfigBuilder.createDummies("default"); - nodeRepository = new NodeRepository(nodeFlavors, curator); - provisionResource = new ProvisionResource(nodeRepository, nodeFlavors); - application = ApplicationId.from(TenantName.from("myTenant"), ApplicationName.from("myApplication"), InstanceName.from("default")); - provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, Zone.defaultZone()); - } - - private void createNodesInRepository(int readyCount, int provisionedCount) { - List<Node> readyNodes = new ArrayList<>(); - for (HostInfo hostInfo : createHostInfos(readyCount, 0)) - readyNodes.add(nodeRepository.createNode(hostInfo.openStackId, hostInfo.hostname, - Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant)); - readyNodes = nodeRepository.addNodes(readyNodes); - nodeRepository.setReady(readyNodes); - - List<Node> provisionedNodes = new ArrayList<>(); - for (HostInfo hostInfo : createHostInfos(provisionedCount, readyCount)) - provisionedNodes.add(nodeRepository.createNode(hostInfo.openStackId, hostInfo.hostname, - Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodeRepository.addNodes(provisionedNodes); - } - - @Test - public void test_node_allocation() { - createNodesInRepository(10, 0); - List<Node> assignments = assignNode(application, capacity); - assertEquals(2, assignments.size()); - } - - @Test - public void test_node_reallocation() { - createNodesInRepository(10, 0); - List<Node> assignments1 = assignNode(application, capacity); - List<Node> assignments2 = assignNode(application, capacity); - - assertEquals(assignments2.size(), assignments1.size()); - } - - @Test - public void test_node_reallocation_add_hostalias() { - createNodesInRepository(5, 0); - - List<Node> assignments1 = assignNode(application, 2); - List<Node> assignments2 = assignNode(application, 3); - - assertEquals(assignments2.size(), assignments1.size() + 1); - } - - @Test - public void test_node_allocation_remove_hostalias() { - createNodesInRepository(10, 0); - - List<Node> assignments1 = assignNode(application, 3, ClusterSpec.Type.container); - List<Node> assignments2 = assignNode(application, 2, ClusterSpec.Type.container); - - assertEquals(assignments2.size(), assignments1.size() - 1); - ProvisionStatus provisionStatus = provisionResource.getStatus(); - assertEquals(1, provisionStatus.decomissionNodes.size()); - } - - @Test - public void test_recycle_deallocated() { - createNodesInRepository(2, 0); - assignNode(application, 2); - NestedTransaction deactivateTransaction = new NestedTransaction(); - nodeRepository.deactivate(application, deactivateTransaction); - deactivateTransaction.commit(); - List<Node> nodes = nodeRepository.deallocate(nodeRepository.getNodes(application, Node.State.inactive)); - assertEquals(0, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); - assertEquals(2, nodeRepository.getNodes(Node.Type.tenant, Node.State.dirty).size()); - provisionResource.setReady(nodes.get(0).hostname()); - provisionResource.setReady(nodes.get(1).hostname()); - assertEquals(2, nodeRepository.getNodes(Node.Type.tenant, Node.State.ready).size()); - assertEquals(0, nodeRepository.getNodes(Node.Type.tenant, Node.State.dirty).size()); - } - - @Test(expected = IllegalArgumentException.class) - public void test_ready_node_unknown() { - provisionResource.setReady("does.not.exist"); - } - - private List<HostInfo> createHostInfos(int count, int startIndex) { - String format = "node%d"; - List<HostInfo> hostInfos = new ArrayList<>(); - for (int i = 0; i < count; ++i) - hostInfos.add(HostInfo.createHostInfo(String.format(format, i + startIndex), UUID.randomUUID().toString(), "medium")); - return hostInfos; - } - - private List<Node> assignNode(ApplicationId applicationId, int capacity) { - return assignNode(applicationId, capacity, ClusterSpec.Type.content); - } - - private List<Node> assignNode(ApplicationId applicationId, int capacity, ClusterSpec.Type type) { - ClusterSpec cluster = ClusterSpec.request(type, ClusterSpec.Id.from("test"), Optional.empty()); - List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, Capacity.fromNodeCount(capacity), 1, null); - NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator)); - provisioner.activate(transaction, applicationId, hosts); - transaction.commit(); - return nodeRepository.getNodes(applicationId, Node.State.active); - } - -} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v1/RestApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v1/RestApiTest.java index 238ca6cfcf5..28b93544238 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v1/RestApiTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v1/RestApiTest.java @@ -13,6 +13,7 @@ import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.HostSpec; import com.yahoo.config.provision.InstanceName; +import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.Zone; import com.yahoo.transaction.NestedTransaction; @@ -85,12 +86,12 @@ public class RestApiTest { NodeFlavors flavors = FlavorConfigBuilder.createDummies("default"); List<Node> nodes = new ArrayList<>(); - nodes.add(createNode("node1", "host1.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(createNode("node2", "host2.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(createNode("node3", "host3.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(createNode("node4", "host4.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(createNode("node5", "host5.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); - nodes.add(createNode("node6", "host6.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), Node.Type.tenant)); + nodes.add(createNode("node1", "host1.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(createNode("node2", "host2.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(createNode("node3", "host3.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(createNode("node4", "host4.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(createNode("node5", "host5.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); + nodes.add(createNode("node6", "host6.yahoo.com", Optional.empty(), flavors.getFlavorOrThrow("default"), NodeType.tenant)); nodes = addNodes(nodes); nodes.remove(5); setReady(nodes); @@ -660,36 +660,6 @@ <version>1.7.17</version> </dependency> <dependency> - <groupId>com.sun.jersey</groupId> - <artifactId>jersey-client</artifactId> - <version>1.13</version> - </dependency> - <dependency> - <groupId>com.sun.jersey</groupId> - <artifactId>jersey-core</artifactId> - <version>1.13</version> - </dependency> - <dependency> - <groupId>com.sun.jersey</groupId> - <artifactId>jersey-json</artifactId> - <version>1.13</version> - </dependency> - <dependency> - <groupId>com.sun.jersey</groupId> - <artifactId>jersey-server</artifactId> - <version>1.13</version> - </dependency> - <dependency> - <groupId>com.sun.jersey.contribs</groupId> - <artifactId>jersey-guice</artifactId> - <version>1.13</version> - </dependency> - <dependency> - <groupId>com.sun.jersey.contribs</groupId> - <artifactId>jersey-multipart</artifactId> - <version>1.13</version> - </dependency> - <dependency> <groupId>commons-cli</groupId> <artifactId>commons-cli</artifactId> <version>1.3.1</version> @@ -1106,14 +1076,14 @@ </dependencyManagement> <properties> - <javax.ws.rs-api.version>2.0</javax.ws.rs-api.version> + <javax.ws.rs-api.version>2.0.1</javax.ws.rs-api.version> <!-- must be kept in sync with version used by current jersey2.version --> <aries.spifly.version>1.0.8</aries.spifly.version> <aries.util.version>1.0.0</aries.util.version> <asm-debug-all.version>5.0.3</asm-debug-all.version> <curator.version>2.9.1</curator.version> - <jackson2.version>2.5.3</jackson2.version> - <jersey2.version>2.10.1</jersey2.version> - <jetty.version>9.3.10.v20160621</jetty.version> + <jackson2.version>2.8.3</jackson2.version> + <jersey2.version>2.18</jersey2.version> + <jetty.version>9.3.12.v20160915</jetty.version> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <test.hide>true</test.hide> <doclint>all</doclint> diff --git a/sample-apps/blog-recommendation/src/main/application/constants/W_final.json b/sample-apps/blog-recommendation/src/main/application/constants/W_final.json new file mode 100644 index 00000000000..f27bdf82295 --- /dev/null +++ b/sample-apps/blog-recommendation/src/main/application/constants/W_final.json @@ -0,0 +1 @@ +{"cells": [{"value": -6.825683116912842, "address": {"hidden": "0", "final": "0"}}, {"value": -11.552776336669922, "address": {"hidden": "1", "final": "0"}}, {"value": -0.03196524456143379, "address": {"hidden": "2", "final": "0"}}, {"value": 3.5572564601898193, "address": {"hidden": "3", "final": "0"}}, {"value": -0.36737191677093506, "address": {"hidden": "4", "final": "0"}}, {"value": -9.204142570495605, "address": {"hidden": "5", "final": "0"}}, {"value": -7.853666305541992, "address": {"hidden": "6", "final": "0"}}, {"value": -7.21183967590332, "address": {"hidden": "7", "final": "0"}}, {"value": -0.7030931115150452, "address": {"hidden": "8", "final": "0"}}, {"value": -5.912731170654297, "address": {"hidden": "9", "final": "0"}}, {"value": -9.247459411621094, "address": {"hidden": "10", "final": "0"}}, {"value": 4.867856502532959, "address": {"hidden": "11", "final": "0"}}, {"value": -0.20304903388023376, "address": {"hidden": "12", "final": "0"}}, {"value": -0.678585410118103, "address": {"hidden": "13", "final": "0"}}, {"value": 4.772542476654053, "address": {"hidden": "14", "final": "0"}}, {"value": -0.7239269614219666, "address": {"hidden": "15", "final": "0"}}, {"value": 0.5636696219444275, "address": {"hidden": "16", "final": "0"}}, {"value": 4.56899356842041, "address": {"hidden": "17", "final": "0"}}, {"value": 3.834077835083008, "address": {"hidden": "18", "final": "0"}}, {"value": -0.5139496326446533, "address": {"hidden": "19", "final": "0"}}, {"value": -8.896514892578125, "address": {"hidden": "20", "final": "0"}}, {"value": -0.39034759998321533, "address": {"hidden": "21", "final": "0"}}, {"value": -7.827820301055908, "address": {"hidden": "22", "final": "0"}}, {"value": -0.08422113955020905, "address": {"hidden": "23", "final": "0"}}, {"value": -0.764652669429779, "address": {"hidden": "24", "final": "0"}}, {"value": -0.6762506365776062, "address": {"hidden": "25", "final": "0"}}, {"value": -9.849908828735352, "address": {"hidden": "26", "final": "0"}}, {"value": -0.5799359679222107, "address": {"hidden": "27", "final": "0"}}, {"value": -7.373893737792969, "address": {"hidden": "28", "final": "0"}}, {"value": -0.7508071660995483, "address": {"hidden": "29", "final": "0"}}, {"value": 6.701768398284912, "address": {"hidden": "30", "final": "0"}}, {"value": -7.672307014465332, "address": {"hidden": "31", "final": "0"}}, {"value": -0.5038443803787231, "address": {"hidden": "32", "final": "0"}}, {"value": -8.238866806030273, "address": {"hidden": "33", "final": "0"}}, {"value": 5.944279670715332, "address": {"hidden": "34", "final": "0"}}, {"value": 5.32857608795166, "address": {"hidden": "35", "final": "0"}}, {"value": -0.19910834729671478, "address": {"hidden": "36", "final": "0"}}, {"value": -7.227928161621094, "address": {"hidden": "37", "final": "0"}}, {"value": -0.7359553575515747, "address": {"hidden": "38", "final": "0"}}, {"value": 6.034699440002441, "address": {"hidden": "39", "final": "0"}}]}
\ No newline at end of file diff --git a/sample-apps/blog-recommendation/src/main/application/constants/W_hidden.json b/sample-apps/blog-recommendation/src/main/application/constants/W_hidden.json new file mode 100644 index 00000000000..88f4626f20c --- /dev/null +++ b/sample-apps/blog-recommendation/src/main/application/constants/W_hidden.json @@ -0,0 +1 @@ +{"cells": [{"value": -0.7189213037490845, "address": {"input": "0", "hidden": "0"}}, {"value": 1.9912770986557007, "address": {"input": "0", "hidden": "1"}}, {"value": 0.04417821019887924, "address": {"input": "0", "hidden": "2"}}, {"value": -0.18242864310741425, "address": {"input": "0", "hidden": "3"}}, {"value": 0.024994252249598503, "address": {"input": "0", "hidden": "4"}}, {"value": -2.0110864639282227, "address": {"input": "0", "hidden": "5"}}, {"value": 0.6684287786483765, "address": {"input": "0", "hidden": "6"}}, {"value": 1.0804332494735718, "address": {"input": "0", "hidden": "7"}}, {"value": -0.03703492507338524, "address": {"input": "0", "hidden": "8"}}, {"value": -0.44906026124954224, "address": {"input": "0", "hidden": "9"}}, {"value": -0.6651250720024109, "address": {"input": "0", "hidden": "10"}}, {"value": -0.46276769042015076, "address": {"input": "0", "hidden": "11"}}, {"value": 0.05562775209546089, "address": {"input": "0", "hidden": "12"}}, {"value": -0.09671487659215927, "address": {"input": "0", "hidden": "13"}}, {"value": 1.0999059677124023, "address": {"input": "0", "hidden": "14"}}, {"value": 0.05439005792140961, "address": {"input": "0", "hidden": "15"}}, {"value": 0.313202828168869, "address": {"input": "0", "hidden": "16"}}, {"value": 0.034285448491573334, "address": {"input": "0", "hidden": "17"}}, {"value": 0.43388330936431885, "address": {"input": "0", "hidden": "18"}}, {"value": -0.04753245413303375, "address": {"input": "0", "hidden": "19"}}, {"value": -0.4028201699256897, "address": {"input": "0", "hidden": "20"}}, {"value": 0.02194174751639366, "address": {"input": "0", "hidden": "21"}}, {"value": -0.8105738759040833, "address": {"input": "0", "hidden": "22"}}, {"value": 0.16375064849853516, "address": {"input": "0", "hidden": "23"}}, {"value": 0.17628535628318787, "address": {"input": "0", "hidden": "24"}}, {"value": 0.09516087919473648, "address": {"input": "0", "hidden": "25"}}, {"value": 1.0945230722427368, "address": {"input": "0", "hidden": "26"}}, {"value": -0.030173057690262794, "address": {"input": "0", "hidden": "27"}}, {"value": -1.4646086692810059, "address": {"input": "0", "hidden": "28"}}, {"value": -0.043420642614364624, "address": {"input": "0", "hidden": "29"}}, {"value": 1.5677211284637451, "address": {"input": "0", "hidden": "30"}}, {"value": 0.6931793093681335, "address": {"input": "0", "hidden": "31"}}, {"value": -0.010467404499650002, "address": {"input": "0", "hidden": "32"}}, {"value": 0.25585490465164185, "address": {"input": "0", "hidden": "33"}}, {"value": -0.9009624123573303, "address": {"input": "0", "hidden": "34"}}, {"value": -1.585007667541504, "address": {"input": "0", "hidden": "35"}}, {"value": 0.14691980183124542, "address": {"input": "0", "hidden": "36"}}, {"value": 0.964773952960968, "address": {"input": "0", "hidden": "37"}}, {"value": 0.014939195476472378, "address": {"input": "0", "hidden": "38"}}, {"value": 1.2953510284423828, "address": {"input": "0", "hidden": "39"}}, {"value": 0.6878533959388733, "address": {"input": "1", "hidden": "0"}}, {"value": -1.6245843172073364, "address": {"input": "1", "hidden": "1"}}, {"value": 0.23698848485946655, "address": {"input": "1", "hidden": "2"}}, {"value": 0.6577396988868713, "address": {"input": "1", "hidden": "3"}}, {"value": 0.5965789556503296, "address": {"input": "1", "hidden": "4"}}, {"value": 0.937586784362793, "address": {"input": "1", "hidden": "5"}}, {"value": 2.0404160022735596, "address": {"input": "1", "hidden": "6"}}, {"value": 1.3567709922790527, "address": {"input": "1", "hidden": "7"}}, {"value": 0.8615828156471252, "address": {"input": "1", "hidden": "8"}}, {"value": 0.01582285575568676, "address": {"input": "1", "hidden": "9"}}, {"value": 0.1541435271501541, "address": {"input": "1", "hidden": "10"}}, {"value": 0.02719242312014103, "address": {"input": "1", "hidden": "11"}}, {"value": 0.39114853739738464, "address": {"input": "1", "hidden": "12"}}, {"value": 0.7858998775482178, "address": {"input": "1", "hidden": "13"}}, {"value": -1.0325614213943481, "address": {"input": "1", "hidden": "14"}}, {"value": 0.9045850038528442, "address": {"input": "1", "hidden": "15"}}, {"value": -0.23162464797496796, "address": {"input": "1", "hidden": "16"}}, {"value": -2.272174835205078, "address": {"input": "1", "hidden": "17"}}, {"value": 0.1055762842297554, "address": {"input": "1", "hidden": "18"}}, {"value": 0.6994998455047607, "address": {"input": "1", "hidden": "19"}}, {"value": -1.938716173171997, "address": {"input": "1", "hidden": "20"}}, {"value": 0.6481356024742126, "address": {"input": "1", "hidden": "21"}}, {"value": 0.44040876626968384, "address": {"input": "1", "hidden": "22"}}, {"value": 0.10532877594232559, "address": {"input": "1", "hidden": "23"}}, {"value": 0.9724994897842407, "address": {"input": "1", "hidden": "24"}}, {"value": 0.9293966293334961, "address": {"input": "1", "hidden": "25"}}, {"value": 0.5219255089759827, "address": {"input": "1", "hidden": "26"}}, {"value": 0.7022855877876282, "address": {"input": "1", "hidden": "27"}}, {"value": -0.7686229348182678, "address": {"input": "1", "hidden": "28"}}, {"value": 0.7357187271118164, "address": {"input": "1", "hidden": "29"}}, {"value": 2.235738754272461, "address": {"input": "1", "hidden": "30"}}, {"value": 1.7580595016479492, "address": {"input": "1", "hidden": "31"}}, {"value": 0.6770991683006287, "address": {"input": "1", "hidden": "32"}}, {"value": -0.962554395198822, "address": {"input": "1", "hidden": "33"}}, {"value": 1.3252968788146973, "address": {"input": "1", "hidden": "34"}}, {"value": 0.22104468941688538, "address": {"input": "1", "hidden": "35"}}, {"value": 0.24260684847831726, "address": {"input": "1", "hidden": "36"}}, {"value": -0.690116286277771, "address": {"input": "1", "hidden": "37"}}, {"value": 0.7648648023605347, "address": {"input": "1", "hidden": "38"}}, {"value": -0.5544788837432861, "address": {"input": "1", "hidden": "39"}}, {"value": -1.5446373224258423, "address": {"input": "2", "hidden": "0"}}, {"value": 0.2900436222553253, "address": {"input": "2", "hidden": "1"}}, {"value": 0.16614891588687897, "address": {"input": "2", "hidden": "2"}}, {"value": 1.7413760423660278, "address": {"input": "2", "hidden": "3"}}, {"value": 0.314350962638855, "address": {"input": "2", "hidden": "4"}}, {"value": 0.5839463472366333, "address": {"input": "2", "hidden": "5"}}, {"value": 1.1174143552780151, "address": {"input": "2", "hidden": "6"}}, {"value": 0.4117319583892822, "address": {"input": "2", "hidden": "7"}}, {"value": 0.2889877259731293, "address": {"input": "2", "hidden": "8"}}, {"value": -1.1281523704528809, "address": {"input": "2", "hidden": "9"}}, {"value": -0.14513351023197174, "address": {"input": "2", "hidden": "10"}}, {"value": -1.5450941324234009, "address": {"input": "2", "hidden": "11"}}, {"value": 0.06881824880838394, "address": {"input": "2", "hidden": "12"}}, {"value": 0.27837443351745605, "address": {"input": "2", "hidden": "13"}}, {"value": 0.44573166966438293, "address": {"input": "2", "hidden": "14"}}, {"value": 0.1965588629245758, "address": {"input": "2", "hidden": "15"}}, {"value": -0.08517644554376602, "address": {"input": "2", "hidden": "16"}}, {"value": -0.6185004711151123, "address": {"input": "2", "hidden": "17"}}, {"value": -0.3780096173286438, "address": {"input": "2", "hidden": "18"}}, {"value": 0.3269176781177521, "address": {"input": "2", "hidden": "19"}}, {"value": 0.8748881220817566, "address": {"input": "2", "hidden": "20"}}, {"value": 0.28132590651512146, "address": {"input": "2", "hidden": "21"}}, {"value": -0.4056054949760437, "address": {"input": "2", "hidden": "22"}}, {"value": 0.09367147833108902, "address": {"input": "2", "hidden": "23"}}, {"value": 0.22576521337032318, "address": {"input": "2", "hidden": "24"}}, {"value": 0.20586106181144714, "address": {"input": "2", "hidden": "25"}}, {"value": 2.2693886756896973, "address": {"input": "2", "hidden": "26"}}, {"value": 0.23238898813724518, "address": {"input": "2", "hidden": "27"}}, {"value": -0.3787096440792084, "address": {"input": "2", "hidden": "28"}}, {"value": 0.4546310007572174, "address": {"input": "2", "hidden": "29"}}, {"value": 1.2593153715133667, "address": {"input": "2", "hidden": "30"}}, {"value": -0.834303617477417, "address": {"input": "2", "hidden": "31"}}, {"value": 0.24100446701049805, "address": {"input": "2", "hidden": "32"}}, {"value": -0.9956692457199097, "address": {"input": "2", "hidden": "33"}}, {"value": -0.9378760457038879, "address": {"input": "2", "hidden": "34"}}, {"value": 0.4271271824836731, "address": {"input": "2", "hidden": "35"}}, {"value": 0.1826498657464981, "address": {"input": "2", "hidden": "36"}}, {"value": -0.23016764223575592, "address": {"input": "2", "hidden": "37"}}, {"value": 0.22004179656505585, "address": {"input": "2", "hidden": "38"}}, {"value": -0.8108733296394348, "address": {"input": "2", "hidden": "39"}}, {"value": 0.7323907017707825, "address": {"input": "3", "hidden": "0"}}, {"value": -0.36809656023979187, "address": {"input": "3", "hidden": "1"}}, {"value": -0.07451724261045456, "address": {"input": "3", "hidden": "2"}}, {"value": 1.5333952903747559, "address": {"input": "3", "hidden": "3"}}, {"value": -0.12986250221729279, "address": {"input": "3", "hidden": "4"}}, {"value": -0.6427800059318542, "address": {"input": "3", "hidden": "5"}}, {"value": 0.5564994215965271, "address": {"input": "3", "hidden": "6"}}, {"value": -0.4298020303249359, "address": {"input": "3", "hidden": "7"}}, {"value": -0.017939642071723938, "address": {"input": "3", "hidden": "8"}}, {"value": 1.4466378688812256, "address": {"input": "3", "hidden": "9"}}, {"value": 1.0441349744796753, "address": {"input": "3", "hidden": "10"}}, {"value": -0.6266963481903076, "address": {"input": "3", "hidden": "11"}}, {"value": -0.11769331246614456, "address": {"input": "3", "hidden": "12"}}, {"value": 0.0026735435239970684, "address": {"input": "3", "hidden": "13"}}, {"value": -0.551913857460022, "address": {"input": "3", "hidden": "14"}}, {"value": -0.16198065876960754, "address": {"input": "3", "hidden": "15"}}, {"value": -0.566704273223877, "address": {"input": "3", "hidden": "16"}}, {"value": 0.9773831963539124, "address": {"input": "3", "hidden": "17"}}, {"value": 1.7869045734405518, "address": {"input": "3", "hidden": "18"}}, {"value": 0.009694541804492474, "address": {"input": "3", "hidden": "19"}}, {"value": -0.2797841727733612, "address": {"input": "3", "hidden": "20"}}, {"value": -0.08333427459001541, "address": {"input": "3", "hidden": "21"}}, {"value": -0.9746790528297424, "address": {"input": "3", "hidden": "22"}}, {"value": 0.019720492884516716, "address": {"input": "3", "hidden": "23"}}, {"value": -0.12381303310394287, "address": {"input": "3", "hidden": "24"}}, {"value": -0.15858975052833557, "address": {"input": "3", "hidden": "25"}}, {"value": -1.041365623474121, "address": {"input": "3", "hidden": "26"}}, {"value": 0.022131886333227158, "address": {"input": "3", "hidden": "27"}}, {"value": 0.32305431365966797, "address": {"input": "3", "hidden": "28"}}, {"value": 0.09926348179578781, "address": {"input": "3", "hidden": "29"}}, {"value": -0.15755106508731842, "address": {"input": "3", "hidden": "30"}}, {"value": 0.8468675017356873, "address": {"input": "3", "hidden": "31"}}, {"value": -0.011141596361994743, "address": {"input": "3", "hidden": "32"}}, {"value": -0.5010656118392944, "address": {"input": "3", "hidden": "33"}}, {"value": -0.6446647047996521, "address": {"input": "3", "hidden": "34"}}, {"value": -0.8181695938110352, "address": {"input": "3", "hidden": "35"}}, {"value": -0.06014057248830795, "address": {"input": "3", "hidden": "36"}}, {"value": 1.7773494720458984, "address": {"input": "3", "hidden": "37"}}, {"value": -0.0887443870306015, "address": {"input": "3", "hidden": "38"}}, {"value": -0.2617745101451874, "address": {"input": "3", "hidden": "39"}}, {"value": -1.804465889930725, "address": {"input": "4", "hidden": "0"}}, {"value": -0.020009662955999374, "address": {"input": "4", "hidden": "1"}}, {"value": -0.026426775380969048, "address": {"input": "4", "hidden": "2"}}, {"value": -0.09071065485477448, "address": {"input": "4", "hidden": "3"}}, {"value": -0.14363348484039307, "address": {"input": "4", "hidden": "4"}}, {"value": 1.1319115161895752, "address": {"input": "4", "hidden": "5"}}, {"value": -0.9459549784660339, "address": {"input": "4", "hidden": "6"}}, {"value": 0.008108599111437798, "address": {"input": "4", "hidden": "7"}}, {"value": -0.5534496903419495, "address": {"input": "4", "hidden": "8"}}, {"value": 1.1136456727981567, "address": {"input": "4", "hidden": "9"}}, {"value": -0.9858945608139038, "address": {"input": "4", "hidden": "10"}}, {"value": -0.27736952900886536, "address": {"input": "4", "hidden": "11"}}, {"value": -0.2722409963607788, "address": {"input": "4", "hidden": "12"}}, {"value": -0.5546658039093018, "address": {"input": "4", "hidden": "13"}}, {"value": 0.6865521669387817, "address": {"input": "4", "hidden": "14"}}, {"value": -0.5457481145858765, "address": {"input": "4", "hidden": "15"}}, {"value": -0.1566510647535324, "address": {"input": "4", "hidden": "16"}}, {"value": 0.1562303900718689, "address": {"input": "4", "hidden": "17"}}, {"value": 0.2913127839565277, "address": {"input": "4", "hidden": "18"}}, {"value": -0.4430377781391144, "address": {"input": "4", "hidden": "19"}}, {"value": -2.1097147464752197, "address": {"input": "4", "hidden": "20"}}, {"value": -0.27838531136512756, "address": {"input": "4", "hidden": "21"}}, {"value": 2.2503154277801514, "address": {"input": "4", "hidden": "22"}}, {"value": -0.08555690199136734, "address": {"input": "4", "hidden": "23"}}, {"value": -0.6184860467910767, "address": {"input": "4", "hidden": "24"}}, {"value": -0.503865122795105, "address": {"input": "4", "hidden": "25"}}, {"value": 0.20057111978530884, "address": {"input": "4", "hidden": "26"}}, {"value": -0.5396428108215332, "address": {"input": "4", "hidden": "27"}}, {"value": 0.18199816346168518, "address": {"input": "4", "hidden": "28"}}, {"value": -0.4444669485092163, "address": {"input": "4", "hidden": "29"}}, {"value": -0.6393587589263916, "address": {"input": "4", "hidden": "30"}}, {"value": -0.7353011965751648, "address": {"input": "4", "hidden": "31"}}, {"value": -0.39619624614715576, "address": {"input": "4", "hidden": "32"}}, {"value": 0.3264741003513336, "address": {"input": "4", "hidden": "33"}}, {"value": 1.9822925329208374, "address": {"input": "4", "hidden": "34"}}, {"value": -3.6068642139434814, "address": {"input": "4", "hidden": "35"}}, {"value": -0.20656000077724457, "address": {"input": "4", "hidden": "36"}}, {"value": 1.332269549369812, "address": {"input": "4", "hidden": "37"}}, {"value": -0.654046893119812, "address": {"input": "4", "hidden": "38"}}, {"value": -0.5216940641403198, "address": {"input": "4", "hidden": "39"}}, {"value": 0.22311687469482422, "address": {"input": "5", "hidden": "0"}}, {"value": 0.5558058023452759, "address": {"input": "5", "hidden": "1"}}, {"value": -0.0792737677693367, "address": {"input": "5", "hidden": "2"}}, {"value": -0.3244069516658783, "address": {"input": "5", "hidden": "3"}}, {"value": 0.1750195026397705, "address": {"input": "5", "hidden": "4"}}, {"value": -0.06466645747423172, "address": {"input": "5", "hidden": "5"}}, {"value": 0.4551229774951935, "address": {"input": "5", "hidden": "6"}}, {"value": -0.34126487374305725, "address": {"input": "5", "hidden": "7"}}, {"value": -0.019982824102044106, "address": {"input": "5", "hidden": "8"}}, {"value": 0.6309963464736938, "address": {"input": "5", "hidden": "9"}}, {"value": 0.3144053518772125, "address": {"input": "5", "hidden": "10"}}, {"value": -0.23200847208499908, "address": {"input": "5", "hidden": "11"}}, {"value": 0.12822861969470978, "address": {"input": "5", "hidden": "12"}}, {"value": -0.01585562527179718, "address": {"input": "5", "hidden": "13"}}, {"value": 0.23135793209075928, "address": {"input": "5", "hidden": "14"}}, {"value": -0.04126732796430588, "address": {"input": "5", "hidden": "15"}}, {"value": -0.6207808256149292, "address": {"input": "5", "hidden": "16"}}, {"value": -0.04102066904306412, "address": {"input": "5", "hidden": "17"}}, {"value": 0.7505157589912415, "address": {"input": "5", "hidden": "18"}}, {"value": 0.09427837282419205, "address": {"input": "5", "hidden": "19"}}, {"value": 0.16650235652923584, "address": {"input": "5", "hidden": "20"}}, {"value": 0.11864404380321503, "address": {"input": "5", "hidden": "21"}}, {"value": 0.18446223437786102, "address": {"input": "5", "hidden": "22"}}, {"value": -0.0009199536871165037, "address": {"input": "5", "hidden": "23"}}, {"value": 0.03573770076036453, "address": {"input": "5", "hidden": "24"}}, {"value": 0.14720793068408966, "address": {"input": "5", "hidden": "25"}}, {"value": -0.1319275051355362, "address": {"input": "5", "hidden": "26"}}, {"value": 0.06830848753452301, "address": {"input": "5", "hidden": "27"}}, {"value": -0.7543832063674927, "address": {"input": "5", "hidden": "28"}}, {"value": 0.2388685941696167, "address": {"input": "5", "hidden": "29"}}, {"value": 0.6166307330131531, "address": {"input": "5", "hidden": "30"}}, {"value": 0.8029781579971313, "address": {"input": "5", "hidden": "31"}}, {"value": -0.012022594921290874, "address": {"input": "5", "hidden": "32"}}, {"value": 0.4352540969848633, "address": {"input": "5", "hidden": "33"}}, {"value": -0.272478848695755, "address": {"input": "5", "hidden": "34"}}, {"value": -0.061343178153038025, "address": {"input": "5", "hidden": "35"}}, {"value": 0.0700683519244194, "address": {"input": "5", "hidden": "36"}}, {"value": 0.19961173832416534, "address": {"input": "5", "hidden": "37"}}, {"value": -0.022546743974089622, "address": {"input": "5", "hidden": "38"}}, {"value": 0.31590497493743896, "address": {"input": "5", "hidden": "39"}}, {"value": -0.25970253348350525, "address": {"input": "6", "hidden": "0"}}, {"value": 0.18223503232002258, "address": {"input": "6", "hidden": "1"}}, {"value": -0.034953925758600235, "address": {"input": "6", "hidden": "2"}}, {"value": 0.09662346541881561, "address": {"input": "6", "hidden": "3"}}, {"value": -0.1577531397342682, "address": {"input": "6", "hidden": "4"}}, {"value": 0.1476498544216156, "address": {"input": "6", "hidden": "5"}}, {"value": 0.6420069932937622, "address": {"input": "6", "hidden": "6"}}, {"value": 1.1801387071609497, "address": {"input": "6", "hidden": "7"}}, {"value": -0.1279468983411789, "address": {"input": "6", "hidden": "8"}}, {"value": -0.3698572814464569, "address": {"input": "6", "hidden": "9"}}, {"value": 0.07398352026939392, "address": {"input": "6", "hidden": "10"}}, {"value": 1.4407870769500732, "address": {"input": "6", "hidden": "11"}}, {"value": -0.06935463845729828, "address": {"input": "6", "hidden": "12"}}, {"value": -0.014039982110261917, "address": {"input": "6", "hidden": "13"}}, {"value": 0.8782438635826111, "address": {"input": "6", "hidden": "14"}}, {"value": -0.1544562131166458, "address": {"input": "6", "hidden": "15"}}, {"value": 0.483896940946579, "address": {"input": "6", "hidden": "16"}}, {"value": -0.8149647116661072, "address": {"input": "6", "hidden": "17"}}, {"value": -0.5367652773857117, "address": {"input": "6", "hidden": "18"}}, {"value": -0.1455787867307663, "address": {"input": "6", "hidden": "19"}}, {"value": -0.4814915955066681, "address": {"input": "6", "hidden": "20"}}, {"value": -0.14869733154773712, "address": {"input": "6", "hidden": "21"}}, {"value": 0.16433019936084747, "address": {"input": "6", "hidden": "22"}}, {"value": -0.030044984072446823, "address": {"input": "6", "hidden": "23"}}, {"value": -0.11718577891588211, "address": {"input": "6", "hidden": "24"}}, {"value": -0.2017647922039032, "address": {"input": "6", "hidden": "25"}}, {"value": -0.09478448331356049, "address": {"input": "6", "hidden": "26"}}, {"value": -0.15259458124637604, "address": {"input": "6", "hidden": "27"}}, {"value": -0.6758605241775513, "address": {"input": "6", "hidden": "28"}}, {"value": -0.25192344188690186, "address": {"input": "6", "hidden": "29"}}, {"value": -0.21192078292369843, "address": {"input": "6", "hidden": "30"}}, {"value": 0.3551881015300751, "address": {"input": "6", "hidden": "31"}}, {"value": -0.18337774276733398, "address": {"input": "6", "hidden": "32"}}, {"value": -0.9451691508293152, "address": {"input": "6", "hidden": "33"}}, {"value": -0.1605629324913025, "address": {"input": "6", "hidden": "34"}}, {"value": -0.25548335909843445, "address": {"input": "6", "hidden": "35"}}, {"value": -0.05075099691748619, "address": {"input": "6", "hidden": "36"}}, {"value": -0.5737307667732239, "address": {"input": "6", "hidden": "37"}}, {"value": -0.18762598931789398, "address": {"input": "6", "hidden": "38"}}, {"value": -0.9560476541519165, "address": {"input": "6", "hidden": "39"}}, {"value": 0.94390469789505, "address": {"input": "7", "hidden": "0"}}, {"value": -0.36691972613334656, "address": {"input": "7", "hidden": "1"}}, {"value": 0.22359393537044525, "address": {"input": "7", "hidden": "2"}}, {"value": 0.3093623220920563, "address": {"input": "7", "hidden": "3"}}, {"value": 0.28234395384788513, "address": {"input": "7", "hidden": "4"}}, {"value": -0.6574575304985046, "address": {"input": "7", "hidden": "5"}}, {"value": -0.01812604069709778, "address": {"input": "7", "hidden": "6"}}, {"value": 0.8994186520576477, "address": {"input": "7", "hidden": "7"}}, {"value": 0.38518932461738586, "address": {"input": "7", "hidden": "8"}}, {"value": -0.5244878530502319, "address": {"input": "7", "hidden": "9"}}, {"value": -0.3173958361148834, "address": {"input": "7", "hidden": "10"}}, {"value": -1.0457731485366821, "address": {"input": "7", "hidden": "11"}}, {"value": 0.28511306643486023, "address": {"input": "7", "hidden": "12"}}, {"value": 0.3600058853626251, "address": {"input": "7", "hidden": "13"}}, {"value": -0.14626894891262054, "address": {"input": "7", "hidden": "14"}}, {"value": 0.4252859950065613, "address": {"input": "7", "hidden": "15"}}, {"value": 0.5760697722434998, "address": {"input": "7", "hidden": "16"}}, {"value": -0.16527177393436432, "address": {"input": "7", "hidden": "17"}}, {"value": 0.22767996788024902, "address": {"input": "7", "hidden": "18"}}, {"value": 0.36496850848197937, "address": {"input": "7", "hidden": "19"}}, {"value": -0.8499257564544678, "address": {"input": "7", "hidden": "20"}}, {"value": 0.2962559163570404, "address": {"input": "7", "hidden": "21"}}, {"value": -0.30709701776504517, "address": {"input": "7", "hidden": "22"}}, {"value": 0.06296849250793457, "address": {"input": "7", "hidden": "23"}}, {"value": 0.4634517431259155, "address": {"input": "7", "hidden": "24"}}, {"value": 0.43814942240715027, "address": {"input": "7", "hidden": "25"}}, {"value": -0.2395002245903015, "address": {"input": "7", "hidden": "26"}}, {"value": 0.3300420343875885, "address": {"input": "7", "hidden": "27"}}, {"value": -0.5939531922340393, "address": {"input": "7", "hidden": "28"}}, {"value": 0.5843372344970703, "address": {"input": "7", "hidden": "29"}}, {"value": 0.011469338089227676, "address": {"input": "7", "hidden": "30"}}, {"value": -0.07167834043502808, "address": {"input": "7", "hidden": "31"}}, {"value": 0.38303056359291077, "address": {"input": "7", "hidden": "32"}}, {"value": 0.8724275231361389, "address": {"input": "7", "hidden": "33"}}, {"value": 0.4990171492099762, "address": {"input": "7", "hidden": "34"}}, {"value": 0.065599225461483, "address": {"input": "7", "hidden": "35"}}, {"value": 0.14045190811157227, "address": {"input": "7", "hidden": "36"}}, {"value": 0.7355637550354004, "address": {"input": "7", "hidden": "37"}}, {"value": 0.521418035030365, "address": {"input": "7", "hidden": "38"}}, {"value": 0.9712653160095215, "address": {"input": "7", "hidden": "39"}}, {"value": 0.6150276064872742, "address": {"input": "8", "hidden": "0"}}, {"value": -0.0912940576672554, "address": {"input": "8", "hidden": "1"}}, {"value": 0.042333487421274185, "address": {"input": "8", "hidden": "2"}}, {"value": 0.30264830589294434, "address": {"input": "8", "hidden": "3"}}, {"value": -0.13960868120193481, "address": {"input": "8", "hidden": "4"}}, {"value": -0.5399911403656006, "address": {"input": "8", "hidden": "5"}}, {"value": -0.6424735188484192, "address": {"input": "8", "hidden": "6"}}, {"value": -0.6105536222457886, "address": {"input": "8", "hidden": "7"}}, {"value": -0.16714352369308472, "address": {"input": "8", "hidden": "8"}}, {"value": -0.4058690667152405, "address": {"input": "8", "hidden": "9"}}, {"value": 2.6821913719177246, "address": {"input": "8", "hidden": "10"}}, {"value": 0.36536505818367004, "address": {"input": "8", "hidden": "11"}}, {"value": -0.03360128402709961, "address": {"input": "8", "hidden": "12"}}, {"value": -0.06559611856937408, "address": {"input": "8", "hidden": "13"}}, {"value": -1.6772416830062866, "address": {"input": "8", "hidden": "14"}}, {"value": -0.12337570637464523, "address": {"input": "8", "hidden": "15"}}, {"value": 0.46300458908081055, "address": {"input": "8", "hidden": "16"}}, {"value": 1.176313877105713, "address": {"input": "8", "hidden": "17"}}, {"value": -0.4508458971977234, "address": {"input": "8", "hidden": "18"}}, {"value": 0.029446247965097427, "address": {"input": "8", "hidden": "19"}}, {"value": -1.061523199081421, "address": {"input": "8", "hidden": "20"}}, {"value": -0.21172618865966797, "address": {"input": "8", "hidden": "21"}}, {"value": -1.053208827972412, "address": {"input": "8", "hidden": "22"}}, {"value": 0.02329195663332939, "address": {"input": "8", "hidden": "23"}}, {"value": -0.02612660452723503, "address": {"input": "8", "hidden": "24"}}, {"value": -0.02202698029577732, "address": {"input": "8", "hidden": "25"}}, {"value": 0.3769114017486572, "address": {"input": "8", "hidden": "26"}}, {"value": -0.10918441414833069, "address": {"input": "8", "hidden": "27"}}, {"value": -0.5467832088470459, "address": {"input": "8", "hidden": "28"}}, {"value": -0.02408364787697792, "address": {"input": "8", "hidden": "29"}}, {"value": -0.08416718989610672, "address": {"input": "8", "hidden": "30"}}, {"value": 0.35907915234565735, "address": {"input": "8", "hidden": "31"}}, {"value": -0.023447556421160698, "address": {"input": "8", "hidden": "32"}}, {"value": -0.6518112421035767, "address": {"input": "8", "hidden": "33"}}, {"value": -0.4251684248447418, "address": {"input": "8", "hidden": "34"}}, {"value": 0.4973130524158478, "address": {"input": "8", "hidden": "35"}}, {"value": -0.14179976284503937, "address": {"input": "8", "hidden": "36"}}, {"value": 0.10665953904390335, "address": {"input": "8", "hidden": "37"}}, {"value": 0.06878776848316193, "address": {"input": "8", "hidden": "38"}}, {"value": -0.993098795413971, "address": {"input": "8", "hidden": "39"}}, {"value": -0.45566821098327637, "address": {"input": "9", "hidden": "0"}}, {"value": -0.22448286414146423, "address": {"input": "9", "hidden": "1"}}, {"value": -0.04582769423723221, "address": {"input": "9", "hidden": "2"}}, {"value": 0.004705323837697506, "address": {"input": "9", "hidden": "3"}}, {"value": -0.23844373226165771, "address": {"input": "9", "hidden": "4"}}, {"value": -0.3488083481788635, "address": {"input": "9", "hidden": "5"}}, {"value": -1.110137701034546, "address": {"input": "9", "hidden": "6"}}, {"value": -0.38555267453193665, "address": {"input": "9", "hidden": "7"}}, {"value": -0.38523054122924805, "address": {"input": "9", "hidden": "8"}}, {"value": 0.260660320520401, "address": {"input": "9", "hidden": "9"}}, {"value": 0.4811837077140808, "address": {"input": "9", "hidden": "10"}}, {"value": -0.3842940628528595, "address": {"input": "9", "hidden": "11"}}, {"value": -0.09798241406679153, "address": {"input": "9", "hidden": "12"}}, {"value": -0.28348833322525024, "address": {"input": "9", "hidden": "13"}}, {"value": 0.8295657634735107, "address": {"input": "9", "hidden": "14"}}, {"value": -0.4336043894290924, "address": {"input": "9", "hidden": "15"}}, {"value": -0.6710841059684753, "address": {"input": "9", "hidden": "16"}}, {"value": 0.44833096861839294, "address": {"input": "9", "hidden": "17"}}, {"value": -0.9160484671592712, "address": {"input": "9", "hidden": "18"}}, {"value": -0.3376787304878235, "address": {"input": "9", "hidden": "19"}}, {"value": 0.3312970697879791, "address": {"input": "9", "hidden": "20"}}, {"value": -0.1262325495481491, "address": {"input": "9", "hidden": "21"}}, {"value": 1.15604567527771, "address": {"input": "9", "hidden": "22"}}, {"value": -0.0831143781542778, "address": {"input": "9", "hidden": "23"}}, {"value": -0.36901965737342834, "address": {"input": "9", "hidden": "24"}}, {"value": -0.48133572936058044, "address": {"input": "9", "hidden": "25"}}, {"value": -0.30520355701446533, "address": {"input": "9", "hidden": "26"}}, {"value": -0.3345697224140167, "address": {"input": "9", "hidden": "27"}}, {"value": 0.6065699458122253, "address": {"input": "9", "hidden": "28"}}, {"value": -0.18363715708255768, "address": {"input": "9", "hidden": "29"}}, {"value": -1.1364085674285889, "address": {"input": "9", "hidden": "30"}}, {"value": 0.3542616665363312, "address": {"input": "9", "hidden": "31"}}, {"value": -0.21116401255130768, "address": {"input": "9", "hidden": "32"}}, {"value": 1.0767202377319336, "address": {"input": "9", "hidden": "33"}}, {"value": 0.08736960589885712, "address": {"input": "9", "hidden": "34"}}, {"value": 0.5449153780937195, "address": {"input": "9", "hidden": "35"}}, {"value": -0.10178443789482117, "address": {"input": "9", "hidden": "36"}}, {"value": -1.0414388179779053, "address": {"input": "9", "hidden": "37"}}, {"value": -0.38782939314842224, "address": {"input": "9", "hidden": "38"}}, {"value": 0.416803240776062, "address": {"input": "9", "hidden": "39"}}, {"value": 0.48801353573799133, "address": {"input": "10", "hidden": "0"}}, {"value": -2.268449068069458, "address": {"input": "10", "hidden": "1"}}, {"value": 0.013481085188686848, "address": {"input": "10", "hidden": "2"}}, {"value": -0.16463084518909454, "address": {"input": "10", "hidden": "3"}}, {"value": -0.011242344975471497, "address": {"input": "10", "hidden": "4"}}, {"value": 2.571396589279175, "address": {"input": "10", "hidden": "5"}}, {"value": -0.34497228264808655, "address": {"input": "10", "hidden": "6"}}, {"value": -1.355134129524231, "address": {"input": "10", "hidden": "7"}}, {"value": 0.2323192059993744, "address": {"input": "10", "hidden": "8"}}, {"value": 0.3359498381614685, "address": {"input": "10", "hidden": "9"}}, {"value": 0.8438993096351624, "address": {"input": "10", "hidden": "10"}}, {"value": -0.837637186050415, "address": {"input": "10", "hidden": "11"}}, {"value": 0.17030148208141327, "address": {"input": "10", "hidden": "12"}}, {"value": 0.29328855872154236, "address": {"input": "10", "hidden": "13"}}, {"value": 1.3566664457321167, "address": {"input": "10", "hidden": "14"}}, {"value": 0.1638750433921814, "address": {"input": "10", "hidden": "15"}}, {"value": 0.0774536281824112, "address": {"input": "10", "hidden": "16"}}, {"value": -0.00605859374627471, "address": {"input": "10", "hidden": "17"}}, {"value": 0.13740883767604828, "address": {"input": "10", "hidden": "18"}}, {"value": 0.26587817072868347, "address": {"input": "10", "hidden": "19"}}, {"value": 0.5085792541503906, "address": {"input": "10", "hidden": "20"}}, {"value": 0.1359466016292572, "address": {"input": "10", "hidden": "21"}}, {"value": 1.0455366373062134, "address": {"input": "10", "hidden": "22"}}, {"value": 0.13918986916542053, "address": {"input": "10", "hidden": "23"}}, {"value": 0.041186656802892685, "address": {"input": "10", "hidden": "24"}}, {"value": 0.14678673446178436, "address": {"input": "10", "hidden": "25"}}, {"value": -1.3198360204696655, "address": {"input": "10", "hidden": "26"}}, {"value": 0.18890082836151123, "address": {"input": "10", "hidden": "27"}}, {"value": 1.6547495126724243, "address": {"input": "10", "hidden": "28"}}, {"value": 0.23717744648456573, "address": {"input": "10", "hidden": "29"}}, {"value": 1.673633098602295, "address": {"input": "10", "hidden": "30"}}, {"value": -0.3760509490966797, "address": {"input": "10", "hidden": "31"}}, {"value": 0.21465246379375458, "address": {"input": "10", "hidden": "32"}}, {"value": -0.05811190605163574, "address": {"input": "10", "hidden": "33"}}, {"value": -1.0253032445907593, "address": {"input": "10", "hidden": "34"}}, {"value": -1.743502140045166, "address": {"input": "10", "hidden": "35"}}, {"value": 0.3710838258266449, "address": {"input": "10", "hidden": "36"}}, {"value": -0.6016693711280823, "address": {"input": "10", "hidden": "37"}}, {"value": 0.24857090413570404, "address": {"input": "10", "hidden": "38"}}, {"value": 0.9352197051048279, "address": {"input": "10", "hidden": "39"}}, {"value": -1.2226574420928955, "address": {"input": "11", "hidden": "0"}}, {"value": 1.7467516660690308, "address": {"input": "11", "hidden": "1"}}, {"value": 0.08228172361850739, "address": {"input": "11", "hidden": "2"}}, {"value": 0.03094615787267685, "address": {"input": "11", "hidden": "3"}}, {"value": 0.31649863719940186, "address": {"input": "11", "hidden": "4"}}, {"value": -0.7355198860168457, "address": {"input": "11", "hidden": "5"}}, {"value": -1.4901366233825684, "address": {"input": "11", "hidden": "6"}}, {"value": -1.8497445583343506, "address": {"input": "11", "hidden": "7"}}, {"value": 0.6089054942131042, "address": {"input": "11", "hidden": "8"}}, {"value": 0.011931431479752064, "address": {"input": "11", "hidden": "9"}}, {"value": -0.24211113154888153, "address": {"input": "11", "hidden": "10"}}, {"value": 0.46545323729515076, "address": {"input": "11", "hidden": "11"}}, {"value": 0.3237782120704651, "address": {"input": "11", "hidden": "12"}}, {"value": 0.6216354370117188, "address": {"input": "11", "hidden": "13"}}, {"value": -0.9739515781402588, "address": {"input": "11", "hidden": "14"}}, {"value": 0.7142964005470276, "address": {"input": "11", "hidden": "15"}}, {"value": -0.06230403482913971, "address": {"input": "11", "hidden": "16"}}, {"value": -1.9097198247909546, "address": {"input": "11", "hidden": "17"}}, {"value": 0.43287256360054016, "address": {"input": "11", "hidden": "18"}}, {"value": 0.5320918560028076, "address": {"input": "11", "hidden": "19"}}, {"value": 2.3325724601745605, "address": {"input": "11", "hidden": "20"}}, {"value": 0.3524457514286041, "address": {"input": "11", "hidden": "21"}}, {"value": -0.34943121671676636, "address": {"input": "11", "hidden": "22"}}, {"value": 0.18303091824054718, "address": {"input": "11", "hidden": "23"}}, {"value": 0.6523104310035706, "address": {"input": "11", "hidden": "24"}}, {"value": 0.5203726887702942, "address": {"input": "11", "hidden": "25"}}, {"value": -0.427277535200119, "address": {"input": "11", "hidden": "26"}}, {"value": 0.6285937428474426, "address": {"input": "11", "hidden": "27"}}, {"value": 0.5402740240097046, "address": {"input": "11", "hidden": "28"}}, {"value": 0.47298678755760193, "address": {"input": "11", "hidden": "29"}}, {"value": 1.986631155014038, "address": {"input": "11", "hidden": "30"}}, {"value": -1.7108628749847412, "address": {"input": "11", "hidden": "31"}}, {"value": 0.6216320991516113, "address": {"input": "11", "hidden": "32"}}, {"value": 1.0898218154907227, "address": {"input": "11", "hidden": "33"}}, {"value": 1.7614911794662476, "address": {"input": "11", "hidden": "34"}}, {"value": 0.11670975387096405, "address": {"input": "11", "hidden": "35"}}, {"value": 0.2007903903722763, "address": {"input": "11", "hidden": "36"}}, {"value": 1.0338503122329712, "address": {"input": "11", "hidden": "37"}}, {"value": 0.7743848562240601, "address": {"input": "11", "hidden": "38"}}, {"value": -0.6331358551979065, "address": {"input": "11", "hidden": "39"}}, {"value": 1.3067914247512817, "address": {"input": "12", "hidden": "0"}}, {"value": -0.546696126461029, "address": {"input": "12", "hidden": "1"}}, {"value": -0.030750971287488937, "address": {"input": "12", "hidden": "2"}}, {"value": 1.0299263000488281, "address": {"input": "12", "hidden": "3"}}, {"value": -0.03963511437177658, "address": {"input": "12", "hidden": "4"}}, {"value": -0.7041047215461731, "address": {"input": "12", "hidden": "5"}}, {"value": -1.0441724061965942, "address": {"input": "12", "hidden": "6"}}, {"value": -0.706171452999115, "address": {"input": "12", "hidden": "7"}}, {"value": 0.09084011614322662, "address": {"input": "12", "hidden": "8"}}, {"value": 1.5810374021530151, "address": {"input": "12", "hidden": "9"}}, {"value": 0.1337917447090149, "address": {"input": "12", "hidden": "10"}}, {"value": -1.3636906147003174, "address": {"input": "12", "hidden": "11"}}, {"value": 0.12462664395570755, "address": {"input": "12", "hidden": "12"}}, {"value": 0.11902748048305511, "address": {"input": "12", "hidden": "13"}}, {"value": 0.6417853236198425, "address": {"input": "12", "hidden": "14"}}, {"value": 0.26484110951423645, "address": {"input": "12", "hidden": "15"}}, {"value": -0.26488837599754333, "address": {"input": "12", "hidden": "16"}}, {"value": -0.3410510718822479, "address": {"input": "12", "hidden": "17"}}, {"value": -0.9904624819755554, "address": {"input": "12", "hidden": "18"}}, {"value": -0.030169688165187836, "address": {"input": "12", "hidden": "19"}}, {"value": -1.0052157640457153, "address": {"input": "12", "hidden": "20"}}, {"value": 0.06489361822605133, "address": {"input": "12", "hidden": "21"}}, {"value": 0.5505566596984863, "address": {"input": "12", "hidden": "22"}}, {"value": 0.13903187215328217, "address": {"input": "12", "hidden": "23"}}, {"value": 0.3302154839038849, "address": {"input": "12", "hidden": "24"}}, {"value": 0.2911994457244873, "address": {"input": "12", "hidden": "25"}}, {"value": -2.4477756023406982, "address": {"input": "12", "hidden": "26"}}, {"value": 0.09440165758132935, "address": {"input": "12", "hidden": "27"}}, {"value": 0.13846740126609802, "address": {"input": "12", "hidden": "28"}}, {"value": -0.15235278010368347, "address": {"input": "12", "hidden": "29"}}, {"value": 0.8790722489356995, "address": {"input": "12", "hidden": "30"}}, {"value": 0.9122262597084045, "address": {"input": "12", "hidden": "31"}}, {"value": 0.16821017861366272, "address": {"input": "12", "hidden": "32"}}, {"value": 0.7952448725700378, "address": {"input": "12", "hidden": "33"}}, {"value": -1.0207489728927612, "address": {"input": "12", "hidden": "34"}}, {"value": -0.027004091069102287, "address": {"input": "12", "hidden": "35"}}, {"value": 0.12336745858192444, "address": {"input": "12", "hidden": "36"}}, {"value": 0.2598689794540405, "address": {"input": "12", "hidden": "37"}}, {"value": 0.2865458130836487, "address": {"input": "12", "hidden": "38"}}, {"value": -0.7882399559020996, "address": {"input": "12", "hidden": "39"}}, {"value": -0.41364479064941406, "address": {"input": "13", "hidden": "0"}}, {"value": 0.534664511680603, "address": {"input": "13", "hidden": "1"}}, {"value": -0.07833722978830338, "address": {"input": "13", "hidden": "2"}}, {"value": 1.4307043552398682, "address": {"input": "13", "hidden": "3"}}, {"value": -0.16073903441429138, "address": {"input": "13", "hidden": "4"}}, {"value": 0.7588227391242981, "address": {"input": "13", "hidden": "5"}}, {"value": -0.6242964863777161, "address": {"input": "13", "hidden": "6"}}, {"value": 0.6100948452949524, "address": {"input": "13", "hidden": "7"}}, {"value": -0.3751679062843323, "address": {"input": "13", "hidden": "8"}}, {"value": -1.6709260940551758, "address": {"input": "13", "hidden": "9"}}, {"value": -0.94948810338974, "address": {"input": "13", "hidden": "10"}}, {"value": 0.18772943317890167, "address": {"input": "13", "hidden": "11"}}, {"value": 0.035481348633766174, "address": {"input": "13", "hidden": "12"}}, {"value": -0.3736555576324463, "address": {"input": "13", "hidden": "13"}}, {"value": -0.9634411334991455, "address": {"input": "13", "hidden": "14"}}, {"value": -0.23755618929862976, "address": {"input": "13", "hidden": "15"}}, {"value": -0.42249223589897156, "address": {"input": "13", "hidden": "16"}}, {"value": 1.256032943725586, "address": {"input": "13", "hidden": "17"}}, {"value": 1.838978886604309, "address": {"input": "13", "hidden": "18"}}, {"value": -0.24314433336257935, "address": {"input": "13", "hidden": "19"}}, {"value": 0.45461562275886536, "address": {"input": "13", "hidden": "20"}}, {"value": -0.21054421365261078, "address": {"input": "13", "hidden": "21"}}, {"value": 1.2402307987213135, "address": {"input": "13", "hidden": "22"}}, {"value": -0.16159981489181519, "address": {"input": "13", "hidden": "23"}}, {"value": -0.2490532398223877, "address": {"input": "13", "hidden": "24"}}, {"value": -0.16430597007274628, "address": {"input": "13", "hidden": "25"}}, {"value": 1.0432748794555664, "address": {"input": "13", "hidden": "26"}}, {"value": -0.383986234664917, "address": {"input": "13", "hidden": "27"}}, {"value": 0.05273546651005745, "address": {"input": "13", "hidden": "28"}}, {"value": -0.35143783688545227, "address": {"input": "13", "hidden": "29"}}, {"value": -0.333139032125473, "address": {"input": "13", "hidden": "30"}}, {"value": -0.9660216569900513, "address": {"input": "13", "hidden": "31"}}, {"value": -0.3582497239112854, "address": {"input": "13", "hidden": "32"}}, {"value": 0.6191223859786987, "address": {"input": "13", "hidden": "33"}}, {"value": -0.7302830815315247, "address": {"input": "13", "hidden": "34"}}, {"value": -0.9088345766067505, "address": {"input": "13", "hidden": "35"}}, {"value": -0.17214040458202362, "address": {"input": "13", "hidden": "36"}}, {"value": -1.8065818548202515, "address": {"input": "13", "hidden": "37"}}, {"value": -0.29499590396881104, "address": {"input": "13", "hidden": "38"}}, {"value": -0.6733576059341431, "address": {"input": "13", "hidden": "39"}}, {"value": 0.3681548535823822, "address": {"input": "14", "hidden": "0"}}, {"value": 0.19136814773082733, "address": {"input": "14", "hidden": "1"}}, {"value": -0.029443055391311646, "address": {"input": "14", "hidden": "2"}}, {"value": 0.6917205452919006, "address": {"input": "14", "hidden": "3"}}, {"value": 0.042334385216236115, "address": {"input": "14", "hidden": "4"}}, {"value": -0.0046339696273207664, "address": {"input": "14", "hidden": "5"}}, {"value": 0.9205418229103088, "address": {"input": "14", "hidden": "6"}}, {"value": -0.12215148657560349, "address": {"input": "14", "hidden": "7"}}, {"value": -0.4373405873775482, "address": {"input": "14", "hidden": "8"}}, {"value": -2.438491106033325, "address": {"input": "14", "hidden": "9"}}, {"value": 0.8429450988769531, "address": {"input": "14", "hidden": "10"}}, {"value": -0.16806763410568237, "address": {"input": "14", "hidden": "11"}}, {"value": -0.049498315900564194, "address": {"input": "14", "hidden": "12"}}, {"value": -0.3178558349609375, "address": {"input": "14", "hidden": "13"}}, {"value": 1.624982237815857, "address": {"input": "14", "hidden": "14"}}, {"value": -0.23427119851112366, "address": {"input": "14", "hidden": "15"}}, {"value": -0.08467927575111389, "address": {"input": "14", "hidden": "16"}}, {"value": 0.27908480167388916, "address": {"input": "14", "hidden": "17"}}, {"value": -0.06842587888240814, "address": {"input": "14", "hidden": "18"}}, {"value": -0.4029286801815033, "address": {"input": "14", "hidden": "19"}}, {"value": 2.5448830127716064, "address": {"input": "14", "hidden": "20"}}, {"value": 0.0498121939599514, "address": {"input": "14", "hidden": "21"}}, {"value": -3.1384317874908447, "address": {"input": "14", "hidden": "22"}}, {"value": -0.04839995503425598, "address": {"input": "14", "hidden": "23"}}, {"value": -0.325638085603714, "address": {"input": "14", "hidden": "24"}}, {"value": -0.3694353997707367, "address": {"input": "14", "hidden": "25"}}, {"value": -0.9925521016120911, "address": {"input": "14", "hidden": "26"}}, {"value": -0.32195666432380676, "address": {"input": "14", "hidden": "27"}}, {"value": -0.23620811104774475, "address": {"input": "14", "hidden": "28"}}, {"value": -0.3169171214103699, "address": {"input": "14", "hidden": "29"}}, {"value": -0.7894417643547058, "address": {"input": "14", "hidden": "30"}}, {"value": 2.1510331630706787, "address": {"input": "14", "hidden": "31"}}, {"value": -0.24617108702659607, "address": {"input": "14", "hidden": "32"}}, {"value": 0.045010752975940704, "address": {"input": "14", "hidden": "33"}}, {"value": 2.619300127029419, "address": {"input": "14", "hidden": "34"}}, {"value": -3.7340643405914307, "address": {"input": "14", "hidden": "35"}}, {"value": -0.036356501281261444, "address": {"input": "14", "hidden": "36"}}, {"value": -0.8507430553436279, "address": {"input": "14", "hidden": "37"}}, {"value": -0.41485053300857544, "address": {"input": "14", "hidden": "38"}}, {"value": -1.1886852979660034, "address": {"input": "14", "hidden": "39"}}, {"value": -0.07488202303647995, "address": {"input": "15", "hidden": "0"}}, {"value": -0.6401030421257019, "address": {"input": "15", "hidden": "1"}}, {"value": 0.09722000360488892, "address": {"input": "15", "hidden": "2"}}, {"value": -0.7863746881484985, "address": {"input": "15", "hidden": "3"}}, {"value": -0.2521696388721466, "address": {"input": "15", "hidden": "4"}}, {"value": 0.12505419552326202, "address": {"input": "15", "hidden": "5"}}, {"value": -0.3045153319835663, "address": {"input": "15", "hidden": "6"}}, {"value": 0.418660432100296, "address": {"input": "15", "hidden": "7"}}, {"value": 0.09068595618009567, "address": {"input": "15", "hidden": "8"}}, {"value": -0.7915244102478027, "address": {"input": "15", "hidden": "9"}}, {"value": -0.3247368633747101, "address": {"input": "15", "hidden": "10"}}, {"value": 0.16134323179721832, "address": {"input": "15", "hidden": "11"}}, {"value": -0.023046625778079033, "address": {"input": "15", "hidden": "12"}}, {"value": 0.16743414103984833, "address": {"input": "15", "hidden": "13"}}, {"value": -0.04808124527335167, "address": {"input": "15", "hidden": "14"}}, {"value": 0.1684686541557312, "address": {"input": "15", "hidden": "15"}}, {"value": -0.5425732731819153, "address": {"input": "15", "hidden": "16"}}, {"value": -0.1078920066356659, "address": {"input": "15", "hidden": "17"}}, {"value": 0.3328703045845032, "address": {"input": "15", "hidden": "18"}}, {"value": 0.054904669523239136, "address": {"input": "15", "hidden": "19"}}, {"value": -0.2112252116203308, "address": {"input": "15", "hidden": "20"}}, {"value": -0.13935962319374084, "address": {"input": "15", "hidden": "21"}}, {"value": -0.026503797620534897, "address": {"input": "15", "hidden": "22"}}, {"value": 0.12536528706550598, "address": {"input": "15", "hidden": "23"}}, {"value": 0.06890115886926651, "address": {"input": "15", "hidden": "24"}}, {"value": -0.08985260128974915, "address": {"input": "15", "hidden": "25"}}, {"value": 0.1327490359544754, "address": {"input": "15", "hidden": "26"}}, {"value": 0.05043184384703636, "address": {"input": "15", "hidden": "27"}}, {"value": 1.1006042957305908, "address": {"input": "15", "hidden": "28"}}, {"value": -0.2191508710384369, "address": {"input": "15", "hidden": "29"}}, {"value": 0.13138830661773682, "address": {"input": "15", "hidden": "30"}}, {"value": -0.8778656125068665, "address": {"input": "15", "hidden": "31"}}, {"value": 0.08934132009744644, "address": {"input": "15", "hidden": "32"}}, {"value": -0.2893460988998413, "address": {"input": "15", "hidden": "33"}}, {"value": -0.3400782346725464, "address": {"input": "15", "hidden": "34"}}, {"value": -0.27302446961402893, "address": {"input": "15", "hidden": "35"}}, {"value": 0.1943623125553131, "address": {"input": "15", "hidden": "36"}}, {"value": -0.10873214155435562, "address": {"input": "15", "hidden": "37"}}, {"value": 0.13197901844978333, "address": {"input": "15", "hidden": "38"}}, {"value": 0.32532960176467896, "address": {"input": "15", "hidden": "39"}}, {"value": 0.06338100135326385, "address": {"input": "16", "hidden": "0"}}, {"value": -0.10240162163972855, "address": {"input": "16", "hidden": "1"}}, {"value": -0.15779024362564087, "address": {"input": "16", "hidden": "2"}}, {"value": 0.07999655604362488, "address": {"input": "16", "hidden": "3"}}, {"value": 0.25976207852363586, "address": {"input": "16", "hidden": "4"}}, {"value": -0.12479044497013092, "address": {"input": "16", "hidden": "5"}}, {"value": -0.6605144739151001, "address": {"input": "16", "hidden": "6"}}, {"value": -1.2090740203857422, "address": {"input": "16", "hidden": "7"}}, {"value": -0.19078128039836884, "address": {"input": "16", "hidden": "8"}}, {"value": 0.1838103085756302, "address": {"input": "16", "hidden": "9"}}, {"value": -0.19941751658916473, "address": {"input": "16", "hidden": "10"}}, {"value": 0.9295722246170044, "address": {"input": "16", "hidden": "11"}}, {"value": -0.15064002573490143, "address": {"input": "16", "hidden": "12"}}, {"value": -0.35396742820739746, "address": {"input": "16", "hidden": "13"}}, {"value": 0.8340908288955688, "address": {"input": "16", "hidden": "14"}}, {"value": -0.019586440175771713, "address": {"input": "16", "hidden": "15"}}, {"value": 0.29381152987480164, "address": {"input": "16", "hidden": "16"}}, {"value": -0.9987826943397522, "address": {"input": "16", "hidden": "17"}}, {"value": -0.29979416728019714, "address": {"input": "16", "hidden": "18"}}, {"value": -0.07882586121559143, "address": {"input": "16", "hidden": "19"}}, {"value": 0.285011351108551, "address": {"input": "16", "hidden": "20"}}, {"value": 0.12765620648860931, "address": {"input": "16", "hidden": "21"}}, {"value": -0.5413493514060974, "address": {"input": "16", "hidden": "22"}}, {"value": -0.011054011061787605, "address": {"input": "16", "hidden": "23"}}, {"value": -0.0847943127155304, "address": {"input": "16", "hidden": "24"}}, {"value": 0.021101731806993484, "address": {"input": "16", "hidden": "25"}}, {"value": -0.2233026921749115, "address": {"input": "16", "hidden": "26"}}, {"value": -0.07032999396324158, "address": {"input": "16", "hidden": "27"}}, {"value": 0.7563269734382629, "address": {"input": "16", "hidden": "28"}}, {"value": 0.040626056492328644, "address": {"input": "16", "hidden": "29"}}, {"value": -0.062364961951971054, "address": {"input": "16", "hidden": "30"}}, {"value": -0.26359882950782776, "address": {"input": "16", "hidden": "31"}}, {"value": -0.04191463813185692, "address": {"input": "16", "hidden": "32"}}, {"value": 1.0276927947998047, "address": {"input": "16", "hidden": "33"}}, {"value": -0.08961732685565948, "address": {"input": "16", "hidden": "34"}}, {"value": -0.005820057820528746, "address": {"input": "16", "hidden": "35"}}, {"value": -0.05757575482130051, "address": {"input": "16", "hidden": "36"}}, {"value": 0.5627974271774292, "address": {"input": "16", "hidden": "37"}}, {"value": -0.09770695120096207, "address": {"input": "16", "hidden": "38"}}, {"value": -0.8534029126167297, "address": {"input": "16", "hidden": "39"}}, {"value": -1.310288429260254, "address": {"input": "17", "hidden": "0"}}, {"value": 0.43657350540161133, "address": {"input": "17", "hidden": "1"}}, {"value": 0.01654934696853161, "address": {"input": "17", "hidden": "2"}}, {"value": 0.2625191807746887, "address": {"input": "17", "hidden": "3"}}, {"value": 0.3400402069091797, "address": {"input": "17", "hidden": "4"}}, {"value": 0.7841347455978394, "address": {"input": "17", "hidden": "5"}}, {"value": 0.2170754224061966, "address": {"input": "17", "hidden": "6"}}, {"value": -1.2295055389404297, "address": {"input": "17", "hidden": "7"}}, {"value": 0.2617177665233612, "address": {"input": "17", "hidden": "8"}}, {"value": 0.8072319626808167, "address": {"input": "17", "hidden": "9"}}, {"value": 0.41624289751052856, "address": {"input": "17", "hidden": "10"}}, {"value": -1.4779707193374634, "address": {"input": "17", "hidden": "11"}}, {"value": 0.03240669518709183, "address": {"input": "17", "hidden": "12"}}, {"value": 0.27354395389556885, "address": {"input": "17", "hidden": "13"}}, {"value": -0.4821840226650238, "address": {"input": "17", "hidden": "14"}}, {"value": 0.46335896849632263, "address": {"input": "17", "hidden": "15"}}, {"value": 0.4070449769496918, "address": {"input": "17", "hidden": "16"}}, {"value": -0.32928481698036194, "address": {"input": "17", "hidden": "17"}}, {"value": 0.4892013967037201, "address": {"input": "17", "hidden": "18"}}, {"value": 0.30557745695114136, "address": {"input": "17", "hidden": "19"}}, {"value": 1.1539162397384644, "address": {"input": "17", "hidden": "20"}}, {"value": 0.29143306612968445, "address": {"input": "17", "hidden": "21"}}, {"value": 0.4841805100440979, "address": {"input": "17", "hidden": "22"}}, {"value": 0.026207374408841133, "address": {"input": "17", "hidden": "23"}}, {"value": 0.4813193678855896, "address": {"input": "17", "hidden": "24"}}, {"value": 0.44812095165252686, "address": {"input": "17", "hidden": "25"}}, {"value": 0.6245359182357788, "address": {"input": "17", "hidden": "26"}}, {"value": 0.24833786487579346, "address": {"input": "17", "hidden": "27"}}, {"value": -0.055918093770742416, "address": {"input": "17", "hidden": "28"}}, {"value": 0.028949495404958725, "address": {"input": "17", "hidden": "29"}}, {"value": 0.5075787901878357, "address": {"input": "17", "hidden": "30"}}, {"value": 0.2661462724208832, "address": {"input": "17", "hidden": "31"}}, {"value": 0.27804797887802124, "address": {"input": "17", "hidden": "32"}}, {"value": -1.0420942306518555, "address": {"input": "17", "hidden": "33"}}, {"value": 0.6086123585700989, "address": {"input": "17", "hidden": "34"}}, {"value": 0.260128915309906, "address": {"input": "17", "hidden": "35"}}, {"value": 0.03878258913755417, "address": {"input": "17", "hidden": "36"}}, {"value": -0.4720776677131653, "address": {"input": "17", "hidden": "37"}}, {"value": 0.26974841952323914, "address": {"input": "17", "hidden": "38"}}, {"value": 0.7904338836669922, "address": {"input": "17", "hidden": "39"}}, {"value": -0.48121631145477295, "address": {"input": "18", "hidden": "0"}}, {"value": -0.07575561106204987, "address": {"input": "18", "hidden": "1"}}, {"value": 0.017364181578159332, "address": {"input": "18", "hidden": "2"}}, {"value": 0.17059797048568726, "address": {"input": "18", "hidden": "3"}}, {"value": 0.16400830447673798, "address": {"input": "18", "hidden": "4"}}, {"value": 0.22135958075523376, "address": {"input": "18", "hidden": "5"}}, {"value": 0.4417360723018646, "address": {"input": "18", "hidden": "6"}}, {"value": 0.8318414688110352, "address": {"input": "18", "hidden": "7"}}, {"value": 0.3182331621646881, "address": {"input": "18", "hidden": "8"}}, {"value": 0.25130540132522583, "address": {"input": "18", "hidden": "9"}}, {"value": -2.929713726043701, "address": {"input": "18", "hidden": "10"}}, {"value": 0.35326412320137024, "address": {"input": "18", "hidden": "11"}}, {"value": 0.03056519664824009, "address": {"input": "18", "hidden": "12"}}, {"value": 0.09749334305524826, "address": {"input": "18", "hidden": "13"}}, {"value": -2.0763955116271973, "address": {"input": "18", "hidden": "14"}}, {"value": 0.09175221621990204, "address": {"input": "18", "hidden": "15"}}, {"value": 0.37537649273872375, "address": {"input": "18", "hidden": "16"}}, {"value": 1.4592469930648804, "address": {"input": "18", "hidden": "17"}}, {"value": 0.12606099247932434, "address": {"input": "18", "hidden": "18"}}, {"value": 0.05173710361123085, "address": {"input": "18", "hidden": "19"}}, {"value": 1.0565519332885742, "address": {"input": "18", "hidden": "20"}}, {"value": 0.2515791654586792, "address": {"input": "18", "hidden": "21"}}, {"value": 1.1518076658248901, "address": {"input": "18", "hidden": "22"}}, {"value": -0.11245499551296234, "address": {"input": "18", "hidden": "23"}}, {"value": 0.050514161586761475, "address": {"input": "18", "hidden": "24"}}, {"value": 0.026319757103919983, "address": {"input": "18", "hidden": "25"}}, {"value": -0.5313353538513184, "address": {"input": "18", "hidden": "26"}}, {"value": 0.17639414966106415, "address": {"input": "18", "hidden": "27"}}, {"value": 0.5311457514762878, "address": {"input": "18", "hidden": "28"}}, {"value": 0.1370747685432434, "address": {"input": "18", "hidden": "29"}}, {"value": 0.3020651042461395, "address": {"input": "18", "hidden": "30"}}, {"value": -0.8850866556167603, "address": {"input": "18", "hidden": "31"}}, {"value": 0.0030117002315819263, "address": {"input": "18", "hidden": "32"}}, {"value": 0.4892079830169678, "address": {"input": "18", "hidden": "33"}}, {"value": -0.1954929530620575, "address": {"input": "18", "hidden": "34"}}, {"value": 0.3166620433330536, "address": {"input": "18", "hidden": "35"}}, {"value": 0.039286211133003235, "address": {"input": "18", "hidden": "36"}}, {"value": -0.46186113357543945, "address": {"input": "18", "hidden": "37"}}, {"value": -0.05340197682380676, "address": {"input": "18", "hidden": "38"}}, {"value": -1.2243982553482056, "address": {"input": "18", "hidden": "39"}}, {"value": 0.7853472828865051, "address": {"input": "19", "hidden": "0"}}, {"value": 0.3530313968658447, "address": {"input": "19", "hidden": "1"}}, {"value": -0.18954713642597198, "address": {"input": "19", "hidden": "2"}}, {"value": -0.0454476997256279, "address": {"input": "19", "hidden": "3"}}, {"value": -0.23542015254497528, "address": {"input": "19", "hidden": "4"}}, {"value": 0.3034687638282776, "address": {"input": "19", "hidden": "5"}}, {"value": 0.7076362371444702, "address": {"input": "19", "hidden": "6"}}, {"value": 0.8080902099609375, "address": {"input": "19", "hidden": "7"}}, {"value": -0.4171127378940582, "address": {"input": "19", "hidden": "8"}}, {"value": -0.12304612994194031, "address": {"input": "19", "hidden": "9"}}, {"value": -0.4654335081577301, "address": {"input": "19", "hidden": "10"}}, {"value": -0.294969379901886, "address": {"input": "19", "hidden": "11"}}, {"value": -0.33517971634864807, "address": {"input": "19", "hidden": "12"}}, {"value": -0.474618524312973, "address": {"input": "19", "hidden": "13"}}, {"value": 0.9876082539558411, "address": {"input": "19", "hidden": "14"}}, {"value": -0.3536674380302429, "address": {"input": "19", "hidden": "15"}}, {"value": -0.40890219807624817, "address": {"input": "19", "hidden": "16"}}, {"value": 0.5216270089149475, "address": {"input": "19", "hidden": "17"}}, {"value": -0.9742269515991211, "address": {"input": "19", "hidden": "18"}}, {"value": -0.35398101806640625, "address": {"input": "19", "hidden": "19"}}, {"value": -0.41245386004447937, "address": {"input": "19", "hidden": "20"}}, {"value": -0.444177508354187, "address": {"input": "19", "hidden": "21"}}, {"value": -1.3861956596374512, "address": {"input": "19", "hidden": "22"}}, {"value": -0.08258633315563202, "address": {"input": "19", "hidden": "23"}}, {"value": -0.519912838935852, "address": {"input": "19", "hidden": "24"}}, {"value": -0.2574532628059387, "address": {"input": "19", "hidden": "25"}}, {"value": 0.3242805600166321, "address": {"input": "19", "hidden": "26"}}, {"value": -0.37017273902893066, "address": {"input": "19", "hidden": "27"}}, {"value": -0.1945808231830597, "address": {"input": "19", "hidden": "28"}}, {"value": -0.5679508447647095, "address": {"input": "19", "hidden": "29"}}, {"value": -1.0001007318496704, "address": {"input": "19", "hidden": "30"}}, {"value": -0.43760791420936584, "address": {"input": "19", "hidden": "31"}}, {"value": -0.44297540187835693, "address": {"input": "19", "hidden": "32"}}, {"value": -0.8634170293807983, "address": {"input": "19", "hidden": "33"}}, {"value": 0.2660033702850342, "address": {"input": "19", "hidden": "34"}}, {"value": 0.5709490776062012, "address": {"input": "19", "hidden": "35"}}, {"value": -0.19037535786628723, "address": {"input": "19", "hidden": "36"}}, {"value": 0.8857525587081909, "address": {"input": "19", "hidden": "37"}}, {"value": -0.41917192935943604, "address": {"input": "19", "hidden": "38"}}, {"value": 0.3601200580596924, "address": {"input": "19", "hidden": "39"}}]}
\ No newline at end of file diff --git a/sample-apps/blog-recommendation/src/main/application/constants/b_final.json b/sample-apps/blog-recommendation/src/main/application/constants/b_final.json new file mode 100644 index 00000000000..ceb58e1d2cc --- /dev/null +++ b/sample-apps/blog-recommendation/src/main/application/constants/b_final.json @@ -0,0 +1 @@ +{"cells": [{"value": 0.0657607913017273, "address": {"final": "0"}}]}
\ No newline at end of file diff --git a/sample-apps/blog-recommendation/src/main/application/constants/b_hidden.json b/sample-apps/blog-recommendation/src/main/application/constants/b_hidden.json new file mode 100644 index 00000000000..eec55773a1a --- /dev/null +++ b/sample-apps/blog-recommendation/src/main/application/constants/b_hidden.json @@ -0,0 +1 @@ +{"cells": [{"value": 0.28026774525642395, "address": {"hidden": "0"}}, {"value": 0.025753282010555267, "address": {"hidden": "1"}}, {"value": 0.0006110007525421679, "address": {"hidden": "2"}}, {"value": 0.2611096203327179, "address": {"hidden": "3"}}, {"value": -0.16608914732933044, "address": {"hidden": "4"}}, {"value": -0.020019574090838432, "address": {"hidden": "5"}}, {"value": -0.02158009447157383, "address": {"hidden": "6"}}, {"value": 0.2171832174062729, "address": {"hidden": "7"}}, {"value": 0.029577188193798065, "address": {"hidden": "8"}}, {"value": -0.07102895528078079, "address": {"hidden": "9"}}, {"value": -0.0017120442353188992, "address": {"hidden": "10"}}, {"value": -0.4862483739852905, "address": {"hidden": "11"}}, {"value": 0.08691747486591339, "address": {"hidden": "12"}}, {"value": 0.12763629853725433, "address": {"hidden": "13"}}, {"value": 0.21345646679401398, "address": {"hidden": "14"}}, {"value": 0.04107722267508507, "address": {"hidden": "15"}}, {"value": -0.1561310738325119, "address": {"hidden": "16"}}, {"value": 0.10497093945741653, "address": {"hidden": "17"}}, {"value": 0.5862319469451904, "address": {"hidden": "18"}}, {"value": 0.0415155403316021, "address": {"hidden": "19"}}, {"value": -0.03449853137135506, "address": {"hidden": "20"}}, {"value": -0.10370904207229614, "address": {"hidden": "21"}}, {"value": 0.06869155913591385, "address": {"hidden": "22"}}, {"value": 0.09407837688922882, "address": {"hidden": "23"}}, {"value": 0.005010432098060846, "address": {"hidden": "24"}}, {"value": -0.012594758532941341, "address": {"hidden": "25"}}, {"value": -0.033177513629198074, "address": {"hidden": "26"}}, {"value": 0.05969003215432167, "address": {"hidden": "27"}}, {"value": 0.5090197920799255, "address": {"hidden": "28"}}, {"value": -0.05808544531464577, "address": {"hidden": "29"}}, {"value": 0.026866639032959938, "address": {"hidden": "30"}}, {"value": -0.08615914732217789, "address": {"hidden": "31"}}, {"value": 0.03457110375165939, "address": {"hidden": "32"}}, {"value": 0.19200445711612701, "address": {"hidden": "33"}}, {"value": -0.11071993410587311, "address": {"hidden": "34"}}, {"value": 0.12121642380952835, "address": {"hidden": "35"}}, {"value": 0.1500770002603531, "address": {"hidden": "36"}}, {"value": -0.024845093488693237, "address": {"hidden": "37"}}, {"value": 0.05682986602187157, "address": {"hidden": "38"}}, {"value": -0.3581112325191498, "address": {"hidden": "39"}}]}
\ No newline at end of file diff --git a/sample-apps/blog-recommendation/src/main/application/constants/input_transform_1.json b/sample-apps/blog-recommendation/src/main/application/constants/input_transform_1.json new file mode 100644 index 00000000000..c28dbf49ddf --- /dev/null +++ b/sample-apps/blog-recommendation/src/main/application/constants/input_transform_1.json @@ -0,0 +1,14 @@ +{ + "cells": [ + { "address": { "user_item_cf": "0", "input": "10"}, "value": 1.0 }, + { "address": { "user_item_cf": "1", "input": "11"}, "value": 1.0 }, + { "address": { "user_item_cf": "2", "input": "12"}, "value": 1.0 }, + { "address": { "user_item_cf": "3", "input": "13"}, "value": 1.0 }, + { "address": { "user_item_cf": "4", "input": "14"}, "value": 1.0 }, + { "address": { "user_item_cf": "5", "input": "15"}, "value": 1.0 }, + { "address": { "user_item_cf": "6", "input": "16"}, "value": 1.0 }, + { "address": { "user_item_cf": "7", "input": "17"}, "value": 1.0 }, + { "address": { "user_item_cf": "8", "input": "18"}, "value": 1.0 }, + { "address": { "user_item_cf": "9", "input": "19"}, "value": 1.0 } + ] +} diff --git a/sample-apps/blog-recommendation/src/main/application/constants/input_transform_2.json b/sample-apps/blog-recommendation/src/main/application/constants/input_transform_2.json new file mode 100644 index 00000000000..e4d5d88baf8 --- /dev/null +++ b/sample-apps/blog-recommendation/src/main/application/constants/input_transform_2.json @@ -0,0 +1,14 @@ +{ + "cells": [ + { "address": { "user_item_cf": "0", "input": "0"}, "value": 1.0 }, + { "address": { "user_item_cf": "1", "input": "1"}, "value": 1.0 }, + { "address": { "user_item_cf": "2", "input": "2"}, "value": 1.0 }, + { "address": { "user_item_cf": "3", "input": "3"}, "value": 1.0 }, + { "address": { "user_item_cf": "4", "input": "4"}, "value": 1.0 }, + { "address": { "user_item_cf": "5", "input": "5"}, "value": 1.0 }, + { "address": { "user_item_cf": "6", "input": "6"}, "value": 1.0 }, + { "address": { "user_item_cf": "7", "input": "7"}, "value": 1.0 }, + { "address": { "user_item_cf": "8", "input": "8"}, "value": 1.0 }, + { "address": { "user_item_cf": "9", "input": "9"}, "value": 1.0 } + ] +} diff --git a/sample-apps/blog-recommendation/src/main/application/hosts.xml b/sample-apps/blog-recommendation/src/main/application/hosts.xml new file mode 100644 index 00000000000..632e48db321 --- /dev/null +++ b/sample-apps/blog-recommendation/src/main/application/hosts.xml @@ -0,0 +1,7 @@ +<?xml version="1.0" encoding="utf-8" ?> +<hosts> + <host name="localhost"> + <alias>node1</alias> + </host> +</hosts> + diff --git a/sample-apps/blog-recommendation/src/main/application/searchdefinitions/blog_post.sd b/sample-apps/blog-recommendation/src/main/application/searchdefinitions/blog_post.sd index 1b92822e425..b0b53032216 100644 --- a/sample-apps/blog-recommendation/src/main/application/searchdefinitions/blog_post.sd +++ b/sample-apps/blog-recommendation/src/main/application/searchdefinitions/blog_post.sd @@ -63,4 +63,95 @@ search blog_post { } } + constant W_hidden { + file: constants/W_hidden.json + type: tensor(input[20],hidden[40]) + } + + constant b_hidden { + file: constants/b_hidden.json + type: tensor(hidden[40]) + } + + constant W_final { + file: constants/W_final.json + type: tensor(hidden[40], final[1]) + } + + constant b_final { + file: constants/b_final.json + type: tensor(final[1]) + } + + constant input_transform_1 { + file: constants/input_transform_1.json + type: tensor(user_item_cf[10], input[20]) + } + + constant input_transform_2 { + file: constants/input_transform_2.json + type: tensor(user_item_cf[10], input[20]) + } + + rank-profile nn_tensor { + + # Why no work? + macro inline matmul(x, y, dim) { + expression: sum(x * y, dim) + } + + macro matmul_user_item_cf(x, y) { + expression: sum(x * y, user_item_cf) + } + + macro matmul_hidden(x, y) { + expression: sum(x * y, hidden) + } + + macro matmul_input(x, y) { + expression: sum(x * y, input) + } + + macro add(x,y) { + expression: x + y + } + + # The input to the neural network is the concatenation of the document and query vectors + macro nn_input() { + expression { + matmul_user_item_cf(attribute(user_item_cf), constant(input_transform_1)) + + + matmul_user_item_cf(query(user_item_cf), constant(input_transform_2)) + } + } + + macro hidden_layer() { + expression { + relu(add(matmul_input(nn_input, constant(W_hidden)), constant(b_hidden))) + + # The '+' causes an error. Why? + # relu(matmul(nn_input, constant(W_hidden), "input") + constant(b_hidden)) + } + } + + macro final_layer() { + expression{ + sigmoid(add(matmul_hidden(hidden_layer, constant(W_final)), constant(b_final))) + + # Same as above + # sigmoid(matmul(hidden_layer, constant(W_final), "hidden") + constant(b_final)) + } + } + + first-phase { + expression: sum(query(user_item_cf) * attribute(user_item_cf)) + } + + second-phase { + rerank-count: 200 + expression: sum(final_layer) + } + + } + } diff --git a/sample-apps/blog-recommendation/src/main/application/services.xml b/sample-apps/blog-recommendation/src/main/application/services.xml index d1650eaca8c..c81cbde6027 100644 --- a/sample-apps/blog-recommendation/src/main/application/services.xml +++ b/sample-apps/blog-recommendation/src/main/application/services.xml @@ -16,7 +16,9 @@ </chain> </search> <document-api/> - <nodes count='1'/> + <nodes> + <node hostalias='node1'/> + </nodes> </jdisc> <content id='content' version='1.0'> @@ -25,12 +27,9 @@ <document mode='index' type='blog_post'/> <document mode='index' type='user'/> </documents> - <nodes count='1'/> - <engine> - <proton> - <searchable-copies>1</searchable-copies> - </proton> - </engine> + <nodes> + <node hostalias='node1' distribution-key='0'/> + </nodes> </content> </services> diff --git a/sample-apps/blog-recommendation/src/main/java/com/yahoo/example/BlogTensorSearcher.java b/sample-apps/blog-recommendation/src/main/java/com/yahoo/example/BlogTensorSearcher.java index 5bbd7140cb8..3a2a3df455f 100644 --- a/sample-apps/blog-recommendation/src/main/java/com/yahoo/example/BlogTensorSearcher.java +++ b/sample-apps/blog-recommendation/src/main/java/com/yahoo/example/BlogTensorSearcher.java @@ -36,7 +36,9 @@ public class BlogTensorSearcher extends Searcher { QueryTreeUtil.andQueryItemWithRoot(query, notItem); // Modify the ranking by using the 'tensor' rank-profile (as defined in blog_post.sd)... - query.properties().set(new CompoundName("ranking"), "tensor"); + if (query.properties().get("ranking") == null) { + query.properties().set(new CompoundName("ranking"), "tensor"); + } // ... and setting 'query(user_item_cf)' used in that rank-profile query.getRanking().getFeatures().put("query(user_item_cf)", toTensor(userItemCfProperty)); diff --git a/sample-apps/blog-tutorial-shared/README.md b/sample-apps/blog-tutorial-shared/README.md index 09ac61e6b56..846156908c3 100644 --- a/sample-apps/blog-tutorial-shared/README.md +++ b/sample-apps/blog-tutorial-shared/README.md @@ -1,6 +1,8 @@ # Vespa tutorial utility scripts -## From raw JSON to Vespa Feeding format +## Vespa Tutorial pt. 1 + +### From raw JSON to Vespa Feeding format $ python parse.py trainPosts.json > somefile.json @@ -10,10 +12,85 @@ Parses JSON from the file trainPosts.json downloaded from Kaggle during the [blo Give it the flag "-p" or "--popularity", and the script also calculates and adds the field `popularity`, as introduced [in the tutorial](https://git.corp.yahoo.com/pages/vespa/documentation/documentation/tutorials/blog-search.html#blog-popularity-signal). -## Building and running the Spark script for calculating latent factors +## Vespa Tutorial pt. 2 + +### Building and running the Spark script for calculating latent factors 1. Install the latest version of [Apache Spark](http://spark.apache.org/) and [sbt](http://www.scala-sbt.org/download.html). 2. Clone this repository and build the Spark script with `sbt package` (in the root directory of this repo). -3. Use the resulting jar file when running spark jobs included in the tutorials.
\ No newline at end of file +3. Use the resulting jar file when running spark jobs included in the tutorials. + +## Vespa Tutorial pt.3 + +Pre-computed data used through out the tutorial can be found [here](http://trdstorage.trondheim.corp.yahoo.com/~tmartins/vespa_tutorial_data/). + +You can download ```vespa_tutorial_data.tar.gz``` (144MB) and decompress it with + + $ wget http://trdstorage.trondheim.corp.yahoo.com/~tmartins/vespa_tutorial_data.tar.gz + $ tar -xvzf vespa_tutorial_data.tar.gz + +### Create Training Dataset + + $ ./generateDataset.R -d vespa_tutorial_data/user_item_cf_cv/product.json \ + -u vespa_tutorial_data/user_item_cf_cv/user.json \ + -t vespa_tutorial_data/training_and_test_indices/train.txt \ + -o vespa_tutorial_data/nn_model/training_set.txt + +### Train model with TensorFlow + +Train the model with + + $ python vespaModel.py --product_features_file_path vespa_tutorial_data/user_item_cf_cv/product.json \ + --user_features_file_path vespa_tutorial_data/user_item_cf_cv/user.json \ + --dataset_file_path vespa_tutorial_data/nn_model/training_set.txt + +Model parameters and summary statistics will be saved at folder ```runs/${start_time}``` with ```${start_time}``` representing the time you started to train the model. + +Visualize the accuracy and loss metrics with + + $ tensorboard --logdir runs/1473845959/summaries/ + +**Note**: The folder ```1473845959``` depends on the time you start to train the model and will be different in your case. + +### Export model parameters to Tensor Vespa format + +```checkpoint_dir``` holds the folder that TensorFlow writes the learned model parameters (stored using protobuf) and ```output_dir``` is the folder that we will output the model parameters in +Vespa Tensor format. + + import vespaModel + + checkpoint_dir = "./runs/1473845959/checkpoints" + output_dir = "application_package/constants" + + serializer = serializeVespaModel(checkpoint_dir, output_dir) + serializer.serialize_to_disk(variable_name = "W_hidden", dimension_names = ['input', 'hidden']) + serializer.serialize_to_disk(variable_name = "b_hidden", dimension_names = ['hidden']) + serializer.serialize_to_disk(variable_name = "W_final", dimension_names = ['hidden', 'final']) + serializer.serialize_to_disk(variable_name = "b_final", dimension_names = ['final']) + +The python code containing the class ```serializeVespaModel``` can be found at: ```src/python/vespaModel.py``` + +### Offline evaluation + +Query Vespa using the rank-profile ```tensor``` for users in the test set and return 100 blog post recommendations. Use those recommendations in the information contained in the test set to compute +metrics defined in the Tutorial pt. 2. + + pig -x local -f tutorial_compute_metric.pig \ + -param VESPA_HADOOP_JAR=vespa-hadoop.jar \ + -param TEST_INDICES=blog-job/training_and_test_indices/testing_set_ids \ + -param ENDPOINT=$(hostname):8080 + -param NUMBER_RECOMMENDATIONS=100 + -param RANKING_NAME=tensor + -param OUTPUT=blog-job/cf-metric + +Repeat the process, but now using the rank-profile ```nn_tensor```. + + pig -x local -f tutorial_compute_metric.pig \ + -param VESPA_HADOOP_JAR=vespa-hadoop.jar \ + -param TEST_INDICES=blog-job/training_and_test_indices/testing_set_ids \ + -param ENDPOINT=$(hostname):8080 + -param NUMBER_RECOMMENDATIONS=100 + -param RANKING_NAME=nn_tensor + -param OUTPUT=blog-job/cf-metric
\ No newline at end of file diff --git a/sample-apps/blog-tutorial-shared/src/R/generateDataset.R b/sample-apps/blog-tutorial-shared/src/R/generateDataset.R new file mode 100644 index 00000000000..b410ad4094c --- /dev/null +++ b/sample-apps/blog-tutorial-shared/src/R/generateDataset.R @@ -0,0 +1,56 @@ +library(jsonlite) +library(dplyr) + +file_path_document <- '/Users/tmartins/projects/yahoo/sw/vespa-examples/blog-recommendation-support/data/blog-job/user_item_cf_cv/product.json' +file_path_user <- '/Users/tmartins/projects/yahoo/sw/vespa-examples/blog-recommendation-support/data/blog-job/user_item_cf_cv/user.json' +file_path_train <- '/Users/tmartins/projects/yahoo/sw/vespa-examples/blog-recommendation-support/data/blog-job/training_and_test_indices/train.txt' +output_file <- '/Users/tmartins/projects/yahoo/sw/vespa-examples/blog-recommendation-support/data/blog-job/nn_model/training_set.txt' + +# get ids from documents that have a latent vector +lines <- readLines(file_path_document) +product_ids <- NULL +for (line in lines){ + product_ids <- c(product_ids, fromJSON(txt=line)$post_id) +} + +# get ids from users that have a latent vector +lines <- readLines(file_path_user) +user_ids <- NULL +for (line in lines){ + user_ids <- c(user_ids, fromJSON(txt=line)$user_id) +} + +# read (product, user) ids used for training +train_ids <- read.delim(file = file_path_train, header = FALSE, stringsAsFactors = FALSE) +colnames(train_ids) <- c("product_id", "user_id") + +# filter out product id and user id that does not have latent vectors +temp <- merge(x = train_ids, y = data.frame(product_id = product_ids)) +final_positive_train_ids <- merge(x = temp, y = data.frame(user_id = user_ids)) + +# add positive labels +final_positive_train_ids <- data.frame(final_positive_train_ids, label = 1) + +# add noise to the data +clicks_per_user <- final_positive_train_ids %>% group_by(user_id) %>% summarise(number_clicks = sum(label)) + +unread_proportion <- 10 +unread_products <- matrix(NA, unread_proportion*sum(clicks_per_user$number_clicks), 3) +colnames(unread_products) <- c("user_id", "product_id", "label") +count <- 0 +for (i in 1:nrow(clicks_per_user)){ + print(paste(i, "/ ", nrow(clicks_per_user))) + number_itens <- unread_proportion * as.numeric(clicks_per_user[i, "number_clicks"]) + row_index <- count + 1:number_itens + count <- count + number_itens + user_id <- clicks_per_user[i, "user_id"] + new_samples <- sample(x = product_ids, size = unread_proportion * as.numeric(clicks_per_user[i, "number_clicks"]), replace = FALSE) + unread_products[row_index, ] <- matrix(c(rep(as.numeric(user_id), number_itens), new_samples, rep(0, number_itens)), ncol = 3) +} + +# create final dataset +final_train_ids <- rbind(final_positive_train_ids, data.frame(unread_products)) +duplicated_rows <- duplicated(x = final_train_ids[, c("user_id", "product_id")]) +final_train_ids <- final_train_ids[!duplicated_rows, ] + +write.table(x = final_train_ids, file = output_file, sep = "\t", quote = FALSE, row.names = FALSE) diff --git a/sample-apps/blog-tutorial-shared/src/main/pig/tutorial_compute_metric.pig b/sample-apps/blog-tutorial-shared/src/main/pig/tutorial_compute_metric.pig index 5df583e1f30..50b4bc19967 100644 --- a/sample-apps/blog-tutorial-shared/src/main/pig/tutorial_compute_metric.pig +++ b/sample-apps/blog-tutorial-shared/src/main/pig/tutorial_compute_metric.pig @@ -2,7 +2,7 @@ REGISTER $VESPA_HADOOP_JAR DEFINE BlogPostRecommendations com.yahoo.vespa.hadoop.pig.VespaQuery( - 'query=http://$ENDPOINT/search/?user_id=<user_id>&hits=$NUMBER_RECOMMENDATIONS', + 'query=http://$ENDPOINT/search/?user_id=<user_id>&hits=$NUMBER_RECOMMENDATIONS&ranking=$RANKING_NAME', 'schema=rank:int,id:chararray,relevance:double,fields/post_id:chararray' ); diff --git a/sample-apps/blog-tutorial-shared/src/main/pig/tutorial_get_recommendation_list.pig b/sample-apps/blog-tutorial-shared/src/main/pig/tutorial_get_recommendation_list.pig new file mode 100644 index 00000000000..ab4245eaa25 --- /dev/null +++ b/sample-apps/blog-tutorial-shared/src/main/pig/tutorial_get_recommendation_list.pig @@ -0,0 +1,21 @@ +REGISTER $VESPA_HADOOP_JAR + +DEFINE BlogPostRecommendations + com.yahoo.vespa.hadoop.pig.VespaQuery( + 'query=http://$ENDPOINT/search/?user_id=<user_id>&hits=$NUMBER_RECOMMENDATIONS&ranking=$RANKING_NAME', + 'schema=rank:int,id:chararray,relevance:double,fields/post_id:chararray' + ); + +-- Load test_set data from a local file +test_indices = LOAD '$TEST_INDICES' AS (post_id:chararray, user_id:chararray); +users = FOREACH test_indices GENERATE user_id; +users = FILTER users BY user_id IS NOT null; +users = DISTINCT users; + +-- Run a set of queries against Vespa +recommendations = FOREACH users GENERATE user_id, + FLATTEN(BlogPostRecommendations(*)) AS (rank, id, relevance, post_id); +recommendations = FOREACH recommendations GENERATE user_id, rank, post_id; +recommendations = FILTER recommendations BY rank IS NOT NULL AND post_id IS NOT NULL; + +STORE recommendations INTO '$OUTPUT'; diff --git a/sample-apps/blog-tutorial-shared/src/main/python/__init__.py b/sample-apps/blog-tutorial-shared/src/python/__init__.py index e69de29bb2d..e69de29bb2d 100644 --- a/sample-apps/blog-tutorial-shared/src/main/python/__init__.py +++ b/sample-apps/blog-tutorial-shared/src/python/__init__.py diff --git a/sample-apps/blog-tutorial-shared/src/main/python/parse.py b/sample-apps/blog-tutorial-shared/src/python/parse.py index 0d5f892eebc..0d5f892eebc 100644 --- a/sample-apps/blog-tutorial-shared/src/main/python/parse.py +++ b/sample-apps/blog-tutorial-shared/src/python/parse.py diff --git a/sample-apps/blog-tutorial-shared/src/python/vespaModel.py b/sample-apps/blog-tutorial-shared/src/python/vespaModel.py new file mode 100755 index 00000000000..fd0718721eb --- /dev/null +++ b/sample-apps/blog-tutorial-shared/src/python/vespaModel.py @@ -0,0 +1,397 @@ +#! /Users/tmartins/anaconda/envs/tensorflow/bin/python + +""" +Train a 2 layers neural network to compute the probability of a user +represented by the vector u liking a document represented by the vector d. + +Usage: ./vespaModel.py --product_features_file_path path \ + --user_features_file_path path \ + --dataset_file_path path + +Expected File formats: + +- product_features_file_path contains a file with rows following the JSON format below: + +{"post_id" : 20, + "user_item_cf" : {"user_item_cf:5" : -0.66617566, + "user_item_cf:6" : 0.29197264, + "user_item_cf:1" : -0.15582734, + "user_item_cf:7" : 0.3350679, + "user_item_cf:2" : -0.16676047, + "user_item_cf:9" : -0.31653953, + "user_item_cf:3" : -0.21495385, + "user_item_cf:4" : -0.036676258, + "user_item_cf:8" : 0.122069225, + "user_item_cf:0" : 0.20922394}} + +- user_features_file_path contains a file with rows following the JSON format below: + +{"user_id" : 270, + "user_item_cf" : {"user_item_cf:5" : -0.54011273, + "user_item_cf:6" : 0.2723072, + "user_item_cf:1" : -0.23280832, + "user_item_cf:7" : -0.011183357, + "user_item_cf:2" : -0.3987285, + "user_item_cf:9" : -0.05703937, + "user_item_cf:3" : 0.04699418, + "user_item_cf:4" : 0.06679048, + "user_item_cf:8" : 0.31399783, + "user_item_cf:0" : 0.5000366}} + +- dataset_file_path contains a file with rows containing tab-separated post_id, user_id, label such as the sample below: + +1000054 118475 1 +10001560 666315 0 +10001560 1230226 0 +10001560 561306 1 +""" + + +import tensorflow as tf +import time +import os +import datetime +import json +import numpy as np + +class getData: + """ + Data pre-processing + """ + def __init__(self, product_features_file_path, user_features_file_path, data_set_file_path): + self.product_features_file_path = product_features_file_path + self.user_features_file_path = user_features_file_path + self.data_set_file_path = data_set_file_path + + # Create user and document lookup features + def parse_cf_features(self, json, id_name): + id = json[id_name] + indexes = ['user_item_cf:' + str(x) for x in range(0,10,1)] + values = [json['user_item_cf'][x] for x in indexes] + return [id, values] + + def get_product_features_lookup(self): + product_features = [self.parse_cf_features(json.loads(line), 'post_id') for line in open(self.product_features_file_path)] + return dict(product_features) + + def get_user_features_lookup(self): + user_features = [self.parse_cf_features(json.loads(line), 'user_id') for line in open(self.user_features_file_path)] + return dict(user_features) + + def parse_dataset(self, line, lookup_user_features, lookup_product_features): + info = line.strip("\n").split("\t") + user_id = float(info[0]) + product_id = float(info[1]) + label = int(info[2]) + return lookup_user_features[user_id], lookup_product_features[product_id], [label] + + def prepare_dataset(self): + lookup_product_features = self.get_product_features_lookup() + lookup_user_features = self.get_user_features_lookup() + with open(self.data_set_file_path) as f: + input_u = []; input_d = []; input_y = [] + for line in f: + u, d, y = self.parse_dataset(line, lookup_user_features, lookup_product_features) + input_u.append(u) + input_d.append(d) + input_y.append(y) + input_u = np.array(input_u) + input_d = np.array(input_d) + input_y = np.array(input_y) + return input_u, input_d, input_y + + def create_train_test_sets(self, input_u, input_d, input_y, seed = 10, perc = 0.2): + # Randomly shuffle data + np.random.seed(seed) + shuffle_indices = np.random.permutation(np.arange(len(input_u))) + input_u_shuffled = input_u[shuffle_indices] + input_d_shuffled = input_d[shuffle_indices] + input_y_shuffled = input_y[shuffle_indices] + + # Split train/test set + dev_samples = int(len(input_u_shuffled)*perc) + u_train, u_dev = input_u_shuffled[:-dev_samples], input_u_shuffled[-dev_samples:] + d_train, d_dev = input_d_shuffled[:-dev_samples], input_d_shuffled[-dev_samples:] + y_train, y_dev = input_y_shuffled[:-dev_samples], input_y_shuffled[-dev_samples:] + print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev))) + + return u_train, u_dev, d_train, d_dev, y_train, y_dev + + def batch_iter(self, data, batch_size, num_epochs, shuffle=True): + """ + Generates a batch iterator for a dataset. + """ + data = np.array(data) + data_size = len(data) + num_batches_per_epoch = int(len(data)/batch_size) + 1 + for epoch in range(num_epochs): + # Shuffle the data at each epoch + if shuffle: + shuffle_indices = np.random.permutation(np.arange(data_size)) + shuffled_data = data[shuffle_indices] + else: + shuffled_data = data + for batch_num in range(num_batches_per_epoch): + start_index = batch_num * batch_size + end_index = min((batch_num + 1) * batch_size, data_size) + yield shuffled_data[start_index:end_index] + +class vespaRunTimeModel: + """ + Model that combine user and document features and needs to be evaluated at query time. + """ + def __init__(self, user_feature_length, doc_feature_length, hidden_length): + + # placeholders + self.input_u = tf.placeholder(tf.float32, [None, user_feature_length], name = 'input_u') + self.input_d = tf.placeholder(tf.float32, [None, doc_feature_length], name = 'input_d') + self.input_y = tf.placeholder(tf.float32, [None, 1], name = 'input_y') + + # merge user and document vector + self.input_concat = tf.concat(1, [self.input_d, self.input_u], name = 'input_concat') + + # hidden layer + self.W_hidden = tf.Variable( + tf.truncated_normal([user_feature_length + + doc_feature_length, hidden_length], stddev=0.1), name = 'W_hidden') + self.b_hidden = tf.Variable(tf.constant(0.1, shape=[hidden_length]), name = 'b_hidden') + + self.hidden_layer = tf.nn.relu(tf.matmul(self.input_concat, self.W_hidden) + self.b_hidden, + name = 'hidden_layer') + + # output layer + self.W_final = tf.Variable( + tf.random_uniform([hidden_length, 1], -0.1, 0.1), + name="W_final") + self.b_final = tf.Variable(tf.zeros([1]), name="b_final") + + self.y = tf.sigmoid(tf.matmul(self.hidden_layer, self.W_final) + self.b_final, name = 'y') + + # prediction based on model output + self.prediction = tf.cast(tf.greater_equal(self.y, 0.5), "float", name = 'prediction') + + # loss function + prob = tf.clip_by_value(self.y,1e-5,1.0 - 1e-5) + self.loss = tf.reduce_mean(- self.input_y * tf.log(prob) - (1 - self.input_y) * tf.log(1 - prob), name = 'loss') + + # accuracy + correct_predictions = tf.equal(self.prediction, self.input_y) + self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy") + + def train_operation(self, learning_rate): + global_step = tf.Variable(0, name="global_step", trainable=False) + #optimizer = tf.train.GradientDescentOptimizer(learning_rate) + optimizer = tf.train.AdagradOptimizer(learning_rate) + train_op = optimizer.minimize(self.loss, global_step=global_step) + return train_op, global_step + + def create_output_dir(self): + timestamp = str(int(time.time())) + out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp)) + print("Writing to {}\n".format(out_dir)) + return out_dir + + def summary_oprations(self): + loss_summary = tf.scalar_summary("loss", self.loss) + acc_summary = tf.scalar_summary("accuracy", self.accuracy) + train_summary_op = tf.merge_summary([loss_summary, acc_summary]) + dev_summary_op = tf.merge_summary([loss_summary, acc_summary]) + return train_summary_op, dev_summary_op + + def train_step(self, u_batch, d_batch, y_batch, writer=None): + """ + A single training step + """ + feed_dict = { + self.input_u: u_batch, + self.input_d: d_batch, + self.input_y: y_batch + } + _, step, summaries, loss, accuracy = sess.run( + [train_op, global_step, train_summary_op, self.loss, self.accuracy], + feed_dict) + time_str = datetime.datetime.now().isoformat() + print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy)) + if writer: + writer.add_summary(summaries, step) + + def dev_step(self, u_batch, d_batch, y_batch, writer=None): + """ + Evaluates model on a dev set + """ + feed_dict = { + self.input_u: u_batch, + self.input_d: d_batch, + self.input_y: y_batch + } + step, summaries, loss, accuracy = sess.run( + [global_step, dev_summary_op, self.loss, self.accuracy], + feed_dict) + time_str = datetime.datetime.now().isoformat() + print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy)) + if writer: + writer.add_summary(summaries, step) + +class serializeVespaModel: + """ + Serialize TensorFlow variables to Vespa JSON format + + Example: + checkpoint_dir = "./runs/1473845959/checkpoints" + output_dir = "./runs/1473845959/vespa_variables" + + serializer = serializeVespaModel(checkpoint_dir, output_dir) + serializer.serialize_to_disk(variable_name = "W_hidden", dimension_names = ['input', 'hidden']) + serializer.serialize_to_disk(variable_name = "b_hidden", dimension_names = ['hidden']) + serializer.serialize_to_disk(variable_name = "W_final", dimension_names = ['hidden', 'final']) + serializer.serialize_to_disk(variable_name = "b_final", dimension_names = ['final']) + """ + def __init__(self, checkpoint_dir, output_dir): + self.checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir) + self.reader = tf.train.NewCheckpointReader(self.checkpoint_file) + self.output_dir = output_dir + + def write_cell_value(self, variable, dimension_names, dimension_address = None): + if dimension_address is None: + dimension_address = [] + shape = variable.shape + if len(shape) == 1: + count = 0 + cells = [] + for element in variable: + dimension_address.append((dimension_names[0], str(count))) + count += 1 + cells.append({ 'address': dict(dimension_address), "value": float(element) }) + return cells + else: + count = 0 + output = [] + for slice in variable: + dimension_address.append((dimension_names[0], str(count))) + output.extend(self.write_cell_value(slice, dimension_names[1:], dimension_address)) + count += 1 + return output + + def write_to_vespa_json_format(self, variable_name, dimension_names): + variable = self.reader.get_tensor(variable_name) + cells = self.write_cell_value(variable, dimension_names) + return json.dumps({'cells': cells}) + + def serialize_to_disk(self, variable_name, dimension_names): + text_file = open(os.path.join(output_dir, variable_name + ".json"), "w") + text_file.write(serializer.write_to_vespa_json_format(variable_name, dimension_names)) + text_file.close() + + +def task_train(): + # Data + tf.flags.DEFINE_string("product_features_file_path", '', "File containing product features") + tf.flags.DEFINE_string("user_features_file_path", '', "File containing user features") + tf.flags.DEFINE_string("dataset_file_path", '', "File containing labels for each document user pair") + + tf.flags.DEFINE_integer("hidden_length_factor", 2, "The hidden layer has size 'hidden_length_factor * input_vector_length'") + + # Misc Parameters + tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") + tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") + + # Training parameters + tf.flags.DEFINE_float("learning_rate", 0.1, "Gradient Descent learning rate") + + tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") + tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)") + tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)") + tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)") + + FLAGS = tf.flags.FLAGS + FLAGS._parse_flags() + print("\nParameters:") + for attr, value in sorted(FLAGS.__flags.items()): + print("{}={}".format(attr.upper(), value)) + print("") + + # Data preparation + data_pre_processing = getData( + FLAGS.product_features_file_path, + FLAGS.user_features_file_path, + FLAGS.dataset_file_path) + + input_u, input_d, input_y = data_pre_processing.prepare_dataset() + u_train, u_dev, d_train, d_dev, y_train, y_dev = data_pre_processing.create_train_test_sets(input_u, input_d, input_y, seed = 10, perc = 0.2) + + user_feature_length = input_u.shape[1] + doc_feature_length = input_d.shape[1] + + + # Create a graph + with tf.Graph().as_default(): + + # Create a session + session_conf = tf.ConfigProto( + allow_soft_placement=FLAGS.allow_soft_placement, + log_device_placement=FLAGS.log_device_placement) + sess = tf.Session(config=session_conf) + with sess.as_default(): + + # instanciate a model + vespa_model = vespaRunTimeModel(user_feature_length = user_feature_length, + doc_feature_length = doc_feature_length, + hidden_length = FLAGS.hidden_length_factor * (user_feature_length + doc_feature_length)) + + # create a train operation + train_op, global_step = vespa_model.train_operation(learning_rate = FLAGS.learning_rate) + + # Summaries for loss and accuracy + train_summary_op, dev_summary_op = vespa_model.summary_oprations() + + # Output directory for models and summaries + out_dir = vespa_model.create_output_dir() + + # Write train summaries to disk + train_summary_dir = os.path.join(out_dir, "summaries", "train") + train_summary_writer = tf.train.SummaryWriter(train_summary_dir, sess.graph) + + # Dev summaries + dev_summary_dir = os.path.join(out_dir, "summaries", "dev") + dev_summary_writer = tf.train.SummaryWriter(dev_summary_dir, sess.graph) + + # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it + checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints")) + checkpoint_prefix = os.path.join(checkpoint_dir, "model") + if not os.path.exists(checkpoint_dir): + os.makedirs(checkpoint_dir) + saver = tf.train.Saver(tf.all_variables()) + + # Initialize all variables + sess.run(tf.initialize_all_variables()) + + # Generate batches + batches = data_pre_processing.batch_iter( + list(zip(u_train, d_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs) + # Training loop. For each batch... + for batch in batches: + u_batch, d_batch, y_batch = zip(*batch) + vespa_model.train_step(u_batch, d_batch, y_batch, writer=train_summary_writer) + current_step = tf.train.global_step(sess, global_step) + if current_step % FLAGS.evaluate_every == 0: + print("\nEvaluation:") + vespa_model.dev_step(u_dev, d_dev, y_dev, writer=dev_summary_writer) + print("") + if current_step % FLAGS.checkpoint_every == 0: + path = saver.save(sess, checkpoint_prefix, global_step=current_step) + print("Saved model checkpoint to {}\n".format(path)) + +if __name__ == "__main__": + + # Task + tf.flags.DEFINE_string("task", 'train', "Train a model from scratch") + + FLAGS = tf.flags.FLAGS + FLAGS._parse_flags() + print("\nParameters:") + for attr, value in sorted(FLAGS.__flags.items()): + print("{}={}".format(attr.upper(), value)) + print("") + + if FLAGS.task == "train": + task_train() diff --git a/searchcore/src/apps/fdispatch/fdispatch.cpp b/searchcore/src/apps/fdispatch/fdispatch.cpp index 58af00accf1..f4771cf3c14 100644 --- a/searchcore/src/apps/fdispatch/fdispatch.cpp +++ b/searchcore/src/apps/fdispatch/fdispatch.cpp @@ -22,6 +22,7 @@ LOG_SETUP("fdispatch"); using fdispatch::Fdispatch; using vespa::config::search::core::FdispatchrcConfig; +using namespace std::literals; extern char FastS_VersionTag[]; @@ -34,8 +35,7 @@ private: protected: vespalib::string _configId; - bool CheckShutdownFlags () - { + bool CheckShutdownFlags () const { return (vespalib::SignalHandler::INT.check() || vespalib::SignalHandler::TERM.check()); } @@ -66,8 +66,7 @@ FastS_FDispatchApp::Main() forcelink_searchlib_aggregation(); if (!GetOptions(&exitCode)) { - EV_STOPPING("fdispatch", - (exitCode == 0) ? "clean shutdown" : "error"); + EV_STOPPING("fdispatch", (exitCode == 0) ? "clean shutdown" : "error"); return exitCode; } @@ -93,15 +92,14 @@ FastS_FDispatchApp::Main() #ifdef RLIMIT_NOFILE struct rlimit curlim; getrlimit(RLIMIT_NOFILE, &curlim); - if (curlim.rlim_cur != (rlim_t)RLIM_INFINITY) + if (curlim.rlim_cur != (rlim_t)RLIM_INFINITY) { LOG(debug, "Max number of open files = %d", (int) curlim.rlim_cur); - else + } else { LOG(debug, "Max number of open files = unlimited"); + } if (curlim.rlim_cur >= 64) { } else { - LOG(error, - "CRITICAL: Too few file descriptors available: %d", - (int)curlim.rlim_cur); + LOG(error, "CRITICAL: Too few file descriptors available: %d", (int)curlim.rlim_cur); throw std::runtime_error("CRITICAL: Too few file descriptors available"); } #endif @@ -109,19 +107,21 @@ FastS_FDispatchApp::Main() getrlimit(RLIMIT_DATA, &curlim); if (curlim.rlim_cur != (rlim_t)RLIM_INFINITY && curlim.rlim_cur < (rlim_t) (400 * 1024 * 1024)) { - if (curlim.rlim_max == (rlim_t)RLIM_INFINITY) + if (curlim.rlim_max == (rlim_t)RLIM_INFINITY) { curlim.rlim_cur = (rlim_t) (400 * 1024 * 1024); - else + } else { curlim.rlim_cur = curlim.rlim_max; + } setrlimit(RLIMIT_DATA, &curlim); getrlimit(RLIMIT_DATA, &curlim); } - if (curlim.rlim_cur != (rlim_t)RLIM_INFINITY) + if (curlim.rlim_cur != (rlim_t)RLIM_INFINITY) { LOG(debug, "VERBOSE: Max data segment size = %dM", (int) ((curlim.rlim_cur + 512 * 1024) / (1024 * 1024))); - else + } else { LOG(debug, "VERBOSE: Max data segment size = unlimited"); + } #endif if (!myfdispatch->Init()) { @@ -139,7 +139,7 @@ FastS_FDispatchApp::Main() if (myfdispatch->Failed()) { throw std::runtime_error("myfdispatch->Failed()"); } - FastOS_Thread::Sleep(1000); + std::this_thread::sleep_for(100ms); #ifndef NO_MONITOR_LATENCY_CHECK if (!myfdispatch->CheckTempFail()) break; @@ -149,6 +149,9 @@ FastS_FDispatchApp::Main() if (myfdispatch->Failed()) { throw std::runtime_error("myfdispatch->Failed()"); } + } catch (std::runtime_error &e) { + LOG(warning, "got std::runtime_error during init: %s", e.what()); + exitCode = 1; } catch (std::exception &e) { LOG(error, "got exception during init: %s", e.what()); exitCode = 1; @@ -160,8 +163,7 @@ FastS_FDispatchApp::Main() LOG(debug, "Deleting fdispatch"); myfdispatch.reset(); LOG(debug, "COMPLETION: Exiting"); - EV_STOPPING("fdispatch", - (exitCode == 0) ? "clean shutdown" : "error"); + EV_STOPPING("fdispatch", (exitCode == 0) ? "clean shutdown" : "error"); return exitCode; } @@ -181,25 +183,18 @@ FastS_FDispatchApp::GetOptions(int *exitCode) LONGOPT_CONFIGID }; int optIndex = 1; // Start with argument 1 - while ((c = GetOptLong("c:", - optArgument, - optIndex, - longopts, - &longopt_index)) != -1) { + while ((c = GetOptLong("c:", optArgument, optIndex, longopts, &longopt_index)) != -1) { switch (c) { case 0: switch (longopt_index) { case LONGOPT_CONFIGID: break; default: - if (optArgument != NULL) - LOG(info, - "longopt %s with arg %s", - longopts[longopt_index].name, optArgument); - else - LOG(info, - "longopt %s", - longopts[longopt_index].name); + if (optArgument != NULL) { + LOG(info, "longopt %s with arg %s", longopts[longopt_index].name, optArgument); + } else { + LOG(info, "longopt %s", longopts[longopt_index].name); + } break; } break; diff --git a/searchcore/src/apps/vespa-gen-testdocs/vespa-gen-testdocs.cpp b/searchcore/src/apps/vespa-gen-testdocs/vespa-gen-testdocs.cpp index 7cbe90151ef..1983ff7a318 100644 --- a/searchcore/src/apps/vespa-gen-testdocs/vespa-gen-testdocs.cpp +++ b/searchcore/src/apps/vespa-gen-testdocs/vespa-gen-testdocs.cpp @@ -16,6 +16,9 @@ typedef vespalib::hash_set<vespalib::string> StringSet; typedef vespalib::hash_set<uint32_t> UIntSet; typedef std::vector<vespalib::string> StringArray; typedef std::shared_ptr<StringArray> StringArraySP; +using namespace vespalib::alloc; +using vespalib::DefaultAlloc; +using vespalib::string; void usageHeader(void) @@ -27,9 +30,9 @@ usageHeader(void) "USAGE:\n"; } -vespalib::string -prependBaseDir(const vespalib::string &baseDir, - const vespalib::string &file) +string +prependBaseDir(const string &baseDir, + const string &file) { if (baseDir.empty() || baseDir == ".") return file; @@ -38,15 +41,15 @@ prependBaseDir(const vespalib::string &baseDir, void -shafile(const vespalib::string &baseDir, - const vespalib::string &file) +shafile(const string &baseDir, + const string &file) { unsigned char digest[SHA256_DIGEST_LENGTH]; SHA256_CTX c; - vespalib::string fullFile(prependBaseDir(baseDir, file)); + string fullFile(prependBaseDir(baseDir, file)); FastOS_File f; std::ostringstream os; - vespalib::AlignedHeapAlloc buf(65536, 4096); + Alloc buf = DefaultAlloc::create(65536, MemoryAllocator::HUGEPAGE_SIZE, 0x1000); f.EnableDirectIO(); bool openres = f.OpenReadOnly(fullFile.c_str()); if (!openres) { @@ -84,7 +87,7 @@ public: StringGenerator(search::Rand48 &rnd); void - rand_string(vespalib::string &res, uint32_t minLen, uint32_t maxLen); + rand_string(string &res, uint32_t minLen, uint32_t maxLen); void rand_unique_array(StringArray &res, @@ -101,7 +104,7 @@ StringGenerator::StringGenerator(search::Rand48 &rnd) void -StringGenerator::rand_string(vespalib::string &res, +StringGenerator::rand_string(string &res, uint32_t minLen, uint32_t maxLen) { @@ -121,7 +124,7 @@ StringGenerator::rand_unique_array(StringArray &res, uint32_t size) { StringSet set(size * 2); - vespalib::string s; + string s; res.reserve(size); for (uint32_t i = 0; i < size; ++i) { @@ -140,10 +143,10 @@ public: typedef std::shared_ptr<FieldGenerator> SP; protected: - const vespalib::string _name; + const string _name; public: - FieldGenerator(const vespalib::string &name); + FieldGenerator(const string &name); virtual ~FieldGenerator(void); @@ -155,12 +158,12 @@ public: clear(void); virtual void - deleteHistogram(const vespalib::string &baseDir, - const vespalib::string &name); + deleteHistogram(const string &baseDir, + const string &name); virtual void - writeHistogram(const vespalib::string &baseDir, - const vespalib::string &name); + writeHistogram(const string &baseDir, + const string &name); virtual void generate(vespalib::asciistream &doc, uint32_t id) = 0; @@ -168,7 +171,7 @@ public: -FieldGenerator::FieldGenerator(const vespalib::string &name) +FieldGenerator::FieldGenerator(const string &name) : _name(name) { } @@ -191,8 +194,8 @@ FieldGenerator::clear(void) void -FieldGenerator::deleteHistogram(const vespalib::string &baseDir, - const vespalib::string &name) +FieldGenerator::deleteHistogram(const string &baseDir, + const string &name) { (void) baseDir; (void) name; @@ -200,8 +203,8 @@ FieldGenerator::deleteHistogram(const vespalib::string &baseDir, void -FieldGenerator::writeHistogram(const vespalib::string &baseDir, - const vespalib::string &name) +FieldGenerator::writeHistogram(const string &baseDir, + const string &name) { (void) baseDir; (void) name; @@ -220,7 +223,7 @@ class RandTextFieldGenerator : public FieldGenerator uint32_t _randFill; public: - RandTextFieldGenerator(const vespalib::string &name, + RandTextFieldGenerator(const string &name, search::Rand48 &rnd, uint32_t numWords, uint32_t minFill, @@ -236,19 +239,19 @@ public: clear(void); virtual void - deleteHistogram(const vespalib::string &baseDir, - const vespalib::string &name); + deleteHistogram(const string &baseDir, + const string &name); virtual void - writeHistogram(const vespalib::string &baseDir, - const vespalib::string &name); + writeHistogram(const string &baseDir, + const string &name); virtual void generate(vespalib::asciistream &doc, uint32_t id); }; -RandTextFieldGenerator::RandTextFieldGenerator(const vespalib::string &name, +RandTextFieldGenerator::RandTextFieldGenerator(const string &name, search::Rand48 &rnd, uint32_t numWords, uint32_t minFill, @@ -295,21 +298,21 @@ RandTextFieldGenerator::clear(void) void -RandTextFieldGenerator::deleteHistogram(const vespalib::string &baseDir, - const vespalib::string &name) +RandTextFieldGenerator::deleteHistogram(const string &baseDir, + const string &name) { - vespalib::string fname(prependBaseDir(baseDir, name) + "-" + _name); + string fname(prependBaseDir(baseDir, name) + "-" + _name); FastOS_File::Delete(fname.c_str()); } void -RandTextFieldGenerator::writeHistogram(const vespalib::string &baseDir, - const vespalib::string &name) +RandTextFieldGenerator::writeHistogram(const string &baseDir, + const string &name) { LOG(info, "%u word collisions for field %s", _colls, _name.c_str()); - vespalib::string fname(name + "-" + _name); - vespalib::string fullName(prependBaseDir(baseDir, fname)); + string fname(name + "-" + _name); + string fullName(prependBaseDir(baseDir, fname)); LOG(info, "Writing histogram %s", fname.c_str()); Fast_BufferedFile f(new FastOS_File); f.WriteOpen(fullName.c_str()); @@ -344,7 +347,7 @@ RandTextFieldGenerator::generate(vespalib::asciistream &doc, uint32_t id) _histogram[wNum]++; else ++_colls; - const vespalib::string &s(_strings[wNum]); + const string &s(_strings[wNum]); assert(s.size() > 0); doc << s; } @@ -358,7 +361,7 @@ class ModTextFieldGenerator : public FieldGenerator std::vector<uint32_t> _mods; public: - ModTextFieldGenerator(const vespalib::string &name, + ModTextFieldGenerator(const string &name, search::Rand48 &rnd, const std::vector<uint32_t> &mods); @@ -369,14 +372,14 @@ public: clear(void); virtual void - writeHistogram(const vespalib::string &name); + writeHistogram(const string &name); virtual void generate(vespalib::asciistream &doc, uint32_t id); }; -ModTextFieldGenerator::ModTextFieldGenerator(const vespalib::string &name, +ModTextFieldGenerator::ModTextFieldGenerator(const string &name, search::Rand48 &rnd, const std::vector<uint32_t> &mods) : FieldGenerator(name), @@ -398,7 +401,7 @@ ModTextFieldGenerator::clear(void) void -ModTextFieldGenerator::writeHistogram(const vespalib::string &name) +ModTextFieldGenerator::writeHistogram(const string &name) { (void) name; } @@ -424,7 +427,7 @@ ModTextFieldGenerator::generate(vespalib::asciistream &doc, uint32_t id) class IdTextFieldGenerator : public FieldGenerator { public: - IdTextFieldGenerator(const vespalib::string &name); + IdTextFieldGenerator(const string &name); virtual ~IdTextFieldGenerator(void); @@ -433,14 +436,14 @@ public: clear(void); virtual void - writeHistogram(const vespalib::string &name); + writeHistogram(const string &name); virtual void generate(vespalib::asciistream &doc, uint32_t id); }; -IdTextFieldGenerator::IdTextFieldGenerator(const vespalib::string &name) +IdTextFieldGenerator::IdTextFieldGenerator(const string &name) : FieldGenerator(name) { } @@ -458,7 +461,7 @@ IdTextFieldGenerator::clear(void) void -IdTextFieldGenerator::writeHistogram(const vespalib::string &name) +IdTextFieldGenerator::writeHistogram(const string &name) { (void) name; } @@ -480,7 +483,7 @@ class RandIntFieldGenerator : public FieldGenerator uint32_t _count; public: - RandIntFieldGenerator(const vespalib::string &name, + RandIntFieldGenerator(const string &name, search::Rand48 &rnd, uint32_t low, uint32_t count); @@ -492,7 +495,7 @@ public: clear(void); virtual void - writeHistogram(const vespalib::string &name); + writeHistogram(const string &name); virtual void generate(vespalib::asciistream &doc, uint32_t id); @@ -500,7 +503,7 @@ public: -RandIntFieldGenerator::RandIntFieldGenerator(const vespalib::string &name, +RandIntFieldGenerator::RandIntFieldGenerator(const string &name, search::Rand48 &rnd, uint32_t low, uint32_t count) @@ -524,7 +527,7 @@ RandIntFieldGenerator::clear(void) void -RandIntFieldGenerator::writeHistogram(const vespalib::string &name) +RandIntFieldGenerator::writeHistogram(const string &name) { (void) name; } @@ -542,8 +545,8 @@ RandIntFieldGenerator::generate(vespalib::asciistream &doc, uint32_t id) class DocumentGenerator { - vespalib::string _docType; - vespalib::string _idPrefix; + string _docType; + string _idPrefix; vespalib::asciistream _doc; typedef std::vector<FieldGenerator::SP> FieldVec; const FieldVec _fields; @@ -551,8 +554,8 @@ class DocumentGenerator void setup(void); public: - DocumentGenerator(const vespalib::string &docType, - const vespalib::string &idPrefix, + DocumentGenerator(const string &docType, + const string &idPrefix, const FieldVec &fields); ~DocumentGenerator(void); @@ -561,26 +564,26 @@ public: clear(void); void - deleteHistogram(const vespalib::string &baseDir, - const vespalib::string &name); + deleteHistogram(const string &baseDir, + const string &name); void - writeHistogram(const vespalib::string &baseDir, - const vespalib::string &name); + writeHistogram(const string &baseDir, + const string &name); void generate(uint32_t id); void generate(uint32_t docMin, uint32_t docCount, - const vespalib::string &baseDir, - const vespalib::string &feedFileName, + const string &baseDir, + const string &feedFileName, bool headers); }; -DocumentGenerator::DocumentGenerator(const vespalib::string &docType, - const vespalib::string &idPrefix, +DocumentGenerator::DocumentGenerator(const string &docType, + const string &idPrefix, const FieldVec &fields) : _docType(docType), _idPrefix(idPrefix), @@ -630,8 +633,8 @@ DocumentGenerator::generate(uint32_t id) void -DocumentGenerator::deleteHistogram(const vespalib::string &baseDir, - const vespalib::string &name) +DocumentGenerator::deleteHistogram(const string &baseDir, + const string &name) { typedef FieldVec::const_iterator FI; for (FI i(_fields.begin()), ie(_fields.end()); i != ie; ++i) { @@ -640,8 +643,8 @@ DocumentGenerator::deleteHistogram(const vespalib::string &baseDir, } void -DocumentGenerator::writeHistogram(const vespalib::string &baseDir, - const vespalib::string &name) +DocumentGenerator::writeHistogram(const string &baseDir, + const string &name) { typedef FieldVec::const_iterator FI; for (FI i(_fields.begin()), ie(_fields.end()); i != ie; ++i) { @@ -651,13 +654,13 @@ DocumentGenerator::writeHistogram(const vespalib::string &baseDir, void DocumentGenerator::generate(uint32_t docMin, uint32_t docCount, - const vespalib::string &baseDir, - const vespalib::string &feedFileName, + const string &baseDir, + const string &feedFileName, bool headers) { - vespalib::string fullName(prependBaseDir(baseDir, feedFileName)); + string fullName(prependBaseDir(baseDir, feedFileName)); FastOS_File::Delete(fullName.c_str()); - vespalib::string histname(feedFileName + ".histogram"); + string histname(feedFileName + ".histogram"); deleteHistogram(baseDir, histname); Fast_BufferedFile f(new FastOS_File); f.WriteOpen(fullName.c_str()); @@ -710,8 +713,8 @@ public: class GenTestDocsApp : public SubApp { - vespalib::string _baseDir; - vespalib::string _docType; + string _baseDir; + string _docType; uint32_t _minDocId; uint32_t _docIdLimit; bool _verbose; @@ -720,7 +723,7 @@ class GenTestDocsApp : public SubApp std::vector<FieldGenerator::SP> _fields; std::vector<uint32_t> _mods; search::Rand48 _rnd; - vespalib::string _outFile; + string _outFile; bool _headers; public: @@ -901,7 +904,7 @@ int GenTestDocsApp::run(void) { printf("Hello world\n"); - vespalib::string idPrefix("id:test:"); + string idPrefix("id:test:"); idPrefix += _docType; idPrefix += "::"; DocumentGenerator dg(_docType, diff --git a/searchcore/src/tests/proton/index/indexmanager_test.cpp b/searchcore/src/tests/proton/index/indexmanager_test.cpp index 97558fa262c..00579ce3d07 100644 --- a/searchcore/src/tests/proton/index/indexmanager_test.cpp +++ b/searchcore/src/tests/proton/index/indexmanager_test.cpp @@ -12,6 +12,7 @@ LOG_SETUP("indexmanager_test"); #include <vespa/searchcorespi/index/indexcollection.h> #include <vespa/searchcorespi/index/indexflushtarget.h> #include <vespa/searchcorespi/index/indexfusiontarget.h> +#include <vespa/searchcorespi/index/index_manager_stats.h> #include <vespa/searchlib/index/docbuilder.h> #include <vespa/searchlib/index/dummyfileheadercontext.h> #include <vespa/searchlib/memoryindex/dictionary.h> @@ -144,6 +145,10 @@ struct Fixture { }); _writeService.indexFieldWriter().sync(); } + void assertStats(uint32_t expNumDiskIndexes, + uint32_t expNumMemoryIndexes, + SerialNum expLastiskIndexSerialNum, + SerialNum expLastMemoryIndexSerialNum); }; void Fixture::flushIndexManager() { @@ -175,6 +180,32 @@ void Fixture::resetIndexManager() { _fileHeaderContext)); } + +void Fixture::assertStats(uint32_t expNumDiskIndexes, + uint32_t expNumMemoryIndexes, + SerialNum expLastDiskIndexSerialNum, + SerialNum expLastMemoryIndexSerialNum) +{ + searchcorespi::IndexManagerStats stats(*_index_manager); + SerialNum lastDiskIndexSerialNum = 0; + SerialNum lastMemoryIndexSerialNum = 0; + const std::vector<searchcorespi::index::DiskIndexStats> & + diskIndexes(stats.getDiskIndexes()); + const std::vector<searchcorespi::index::MemoryIndexStats> & + memoryIndexes(stats.getMemoryIndexes()); + if (!diskIndexes.empty()) { + lastDiskIndexSerialNum = diskIndexes.back().getSerialNum(); + } + if (!memoryIndexes.empty()) { + lastMemoryIndexSerialNum = memoryIndexes.back().getSerialNum(); + } + EXPECT_EQUAL(expNumDiskIndexes, diskIndexes.size()); + EXPECT_EQUAL(expNumMemoryIndexes, memoryIndexes.size()); + EXPECT_EQUAL(expLastDiskIndexSerialNum, lastDiskIndexSerialNum); + EXPECT_EQUAL(expLastMemoryIndexSerialNum, lastMemoryIndexSerialNum); +} + + TEST_F("requireThatEmptyMemoryIndexIsNotFlushed", Fixture) { IIndexCollection::SP sources = f._index_manager->getMaintainer().getSourceCollection(); EXPECT_EQUAL(1u, sources->getSourceCount()); @@ -679,6 +710,16 @@ TEST_F("require that wipeHistory updates schema on disk", Fixture) { EXPECT_EQUAL(0u, s.getNumIndexFields()); } +TEST_F("require that indexes manager stats can be generated", Fixture) +{ + TEST_DO(f.assertStats(0, 1, 0, 0)); + f.addDocument(1); + TEST_DO(f.assertStats(0, 1, 0, 1)); + f.flushIndexManager(); + TEST_DO(f.assertStats(1, 1, 1, 1)); + f.addDocument(2); + TEST_DO(f.assertStats(1, 1, 1, 2)); +} } // namespace diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def index 45052c3f865..ffc69a18f17 100644 --- a/searchcore/src/vespa/searchcore/config/proton.def +++ b/searchcore/src/vespa/searchcore/config/proton.def @@ -171,12 +171,21 @@ grouping.sessionmanager.maxentries int default=500 restart ## Control of pruning interval to remove sessions that have timed out grouping.sessionmanager.pruning.interval double default=1.0 +## Redundancy of documents. +distribution.redundancy long default=1 restart + +## Searchable copies of the documents. +distribution.searchablecopies long default=1 restart + ## Minimum initial size for any per document tables. -grow.initial int default=1024 restart +grow.initial long default=1024 restart + ## Grow factor in percent for any per document tables. grow.factor int default=50 restart + ## Constant added when growing any per document tables. grow.add int default=1 restart + ## The number of documents to amortize memory spike cost over grow.numdocs int default=10000 restart diff --git a/searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.cpp b/searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.cpp index 285e37abee7..490e02684e9 100644 --- a/searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.cpp +++ b/searchcore/src/vespa/searchcore/fdispatch/search/plain_dataset.cpp @@ -21,9 +21,6 @@ #include <vespa/searchcore/fdispatch/search/plain_dataset.h> #include <vespa/searchcore/fdispatch/search/nodemanager.h> -#include <boost/bind.hpp> -#include <boost/mem_fn.hpp> - LOG_SETUP(".search.plain_dataset"); //-------------------------------------------------------------------------- diff --git a/searchcore/src/vespa/searchcore/proton/index/diskindexwrapper.cpp b/searchcore/src/vespa/searchcore/proton/index/diskindexwrapper.cpp index f7a16bdcc81..ad78835fa37 100644 --- a/searchcore/src/vespa/searchcore/proton/index/diskindexwrapper.cpp +++ b/searchcore/src/vespa/searchcore/proton/index/diskindexwrapper.cpp @@ -4,29 +4,48 @@ LOG_SETUP(".proton.index.diskindexwrapper"); #include "diskindexwrapper.h" +#include <vespa/searchcorespi/index/indexreadutilities.h> +#include <vespa/searchcorespi/index/indexsearchablevisitor.h> using search::TuneFileSearch; +using searchcorespi::index::IndexReadUtilities; namespace proton { DiskIndexWrapper::DiskIndexWrapper(const vespalib::string &indexDir, const TuneFileSearch &tuneFileSearch, size_t cacheSize) - : _index(indexDir, cacheSize) + : _index(indexDir, cacheSize), + _serialNum(0) { bool setupIndexOk = _index.setup(tuneFileSearch); assert(setupIndexOk); (void) setupIndexOk; + _serialNum = IndexReadUtilities::readSerialNum(indexDir); } DiskIndexWrapper::DiskIndexWrapper(const DiskIndexWrapper &oldIndex, const TuneFileSearch &tuneFileSearch, size_t cacheSize) - : _index(oldIndex._index.getIndexDir(), cacheSize) + : _index(oldIndex._index.getIndexDir(), cacheSize), + _serialNum(0) { bool setupIndexOk = _index.setup(tuneFileSearch, oldIndex._index); assert(setupIndexOk); (void) setupIndexOk; + _serialNum = oldIndex.getSerialNum(); +} + +search::SerialNum +DiskIndexWrapper::getSerialNum() const +{ + return _serialNum; +} + +void +DiskIndexWrapper::accept(searchcorespi::IndexSearchableVisitor &visitor) const +{ + visitor.visit(*this); } } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/index/diskindexwrapper.h b/searchcore/src/vespa/searchcore/proton/index/diskindexwrapper.h index 9deef043f6e..20ba61ea13a 100644 --- a/searchcore/src/vespa/searchcore/proton/index/diskindexwrapper.h +++ b/searchcore/src/vespa/searchcore/proton/index/diskindexwrapper.h @@ -10,6 +10,7 @@ namespace proton { class DiskIndexWrapper : public searchcorespi::index::IDiskIndex { private: search::diskindex::DiskIndex _index; + search::SerialNum _serialNum; public: DiskIndexWrapper(const vespalib::string &indexDir, @@ -44,6 +45,10 @@ public: .sizeOnDisk(_index.getSize()); } + virtual search::SerialNum getSerialNum() const override; + + virtual void accept(searchcorespi::IndexSearchableVisitor &visitor) const override; + /** * Implements proton::IDiskIndex */ diff --git a/searchcore/src/vespa/searchcore/proton/index/indexmanager.cpp b/searchcore/src/vespa/searchcore/proton/index/indexmanager.cpp index ad2e3f29853..7e9aa2436b1 100644 --- a/searchcore/src/vespa/searchcore/proton/index/indexmanager.cpp +++ b/searchcore/src/vespa/searchcore/proton/index/indexmanager.cpp @@ -42,12 +42,14 @@ IndexManager::MaintainerOperations::MaintainerOperations(const FileHeaderContext } IMemoryIndex::SP -IndexManager::MaintainerOperations::createMemoryIndex(const Schema &schema) +IndexManager::MaintainerOperations::createMemoryIndex(const Schema &schema, + SerialNum serialNum) { return IMemoryIndex::SP(new MemoryIndexWrapper(schema, _fileHeaderContext, _tuneFileIndexing, - _threadingService)); + _threadingService, + serialNum)); } IDiskIndex::SP diff --git a/searchcore/src/vespa/searchcore/proton/index/indexmanager.h b/searchcore/src/vespa/searchcore/proton/index/indexmanager.h index 8a7d6a64b30..f81c949d8b3 100644 --- a/searchcore/src/vespa/searchcore/proton/index/indexmanager.h +++ b/searchcore/src/vespa/searchcore/proton/index/indexmanager.h @@ -36,7 +36,8 @@ public: threadingService); virtual searchcorespi::index::IMemoryIndex::SP - createMemoryIndex(const search::index::Schema &schema); + createMemoryIndex(const search::index::Schema &schema, + SerialNum serialNum) override; virtual searchcorespi::index::IDiskIndex::SP loadDiskIndex(const vespalib::string &indexDir); virtual searchcorespi::index::IDiskIndex::SP diff --git a/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp b/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp index c1535082407..73a86c805e2 100644 --- a/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp +++ b/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.cpp @@ -7,6 +7,7 @@ LOG_SETUP(".proton.index.memoryindexwrapper"); #include <vespa/searchlib/common/serialnumfileheadercontext.h> #include <vespa/searchlib/diskindex/indexbuilder.h> #include <vespa/vespalib/util/exceptions.h> +#include <vespa/searchcorespi/index/indexsearchablevisitor.h> using search::TuneFileIndexing; using search::common::FileHeaderContext; @@ -22,9 +23,11 @@ MemoryIndexWrapper::MemoryIndexWrapper(const search::index::Schema &schema, const search::common::FileHeaderContext &fileHeaderContext, const TuneFileIndexing &tuneFileIndexing, searchcorespi::index::IThreadingService & - threadingService) + threadingService, + search::SerialNum serialNum) : _index(schema, threadingService.indexFieldInverter(), threadingService.indexFieldWriter()), + _serialNum(serialNum), _fileHeaderContext(fileHeaderContext), _tuneFileIndexing(tuneFileIndexing) { @@ -46,5 +49,17 @@ MemoryIndexWrapper::flushToDisk(const vespalib::string &flushDir, indexBuilder.close(); } +search::SerialNum +MemoryIndexWrapper::getSerialNum() const +{ + return _serialNum.load(std::memory_order_relaxed); +} + +void +MemoryIndexWrapper::accept(searchcorespi::IndexSearchableVisitor &visitor) const +{ + visitor.visit(*this); +} + } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.h b/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.h index 5a058326db2..a4ef714d09d 100644 --- a/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.h +++ b/searchcore/src/vespa/searchcore/proton/index/memoryindexwrapper.h @@ -7,6 +7,7 @@ #include <vespa/searchcorespi/index/ithreadingservice.h> #include <vespa/searchlib/common/tunefileinfo.h> #include <vespa/searchlib/common/fileheadercontext.h> +#include <atomic> namespace proton { @@ -17,6 +18,7 @@ namespace proton { class MemoryIndexWrapper : public searchcorespi::index::IMemoryIndex { private: search::memoryindex::MemoryIndex _index; + std::atomic<search::SerialNum> _serialNum; const search::common::FileHeaderContext &_fileHeaderContext; const search::TuneFileIndexing _tuneFileIndexing; @@ -25,7 +27,8 @@ public: const search::common::FileHeaderContext &fileHeaderContext, const search::TuneFileIndexing &tuneFileIndexing, searchcorespi::index::IThreadingService & - threadingService); + threadingService, + search::SerialNum serialNum); /** * Implements searchcorespi::IndexSearchable @@ -53,6 +56,10 @@ public: .sizeOnDisk(0); } + virtual search::SerialNum getSerialNum() const override; + + virtual void accept(searchcorespi::IndexSearchableVisitor &visitor) const override; + /** * Implements proton::IMemoryIndex */ @@ -74,8 +81,10 @@ public: uint64_t getStaticMemoryFootprint() const override { return _index.getStaticMemoryFootprint(); } - virtual void commit(OnWriteDoneType onWriteDone) override { + virtual void commit(OnWriteDoneType onWriteDone, + search::SerialNum serialNum) override { _index.commit(onWriteDone); + _serialNum.store(serialNum, std::memory_order_relaxed); } virtual void wipeHistory(const search::index::Schema &schema) override{ _index.wipeHistory(schema); diff --git a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp index 541f7a107ca..f6fda5c0df7 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp @@ -49,13 +49,14 @@ DocumentSubDBCollection::DocumentSubDBCollection( _bucketDB(), _bucketDBHandler() { - + const ProtonConfig::Grow & growCfg = protonCfg.grow; + const ProtonConfig::Distribution & distCfg = protonCfg.distribution; _bucketDB = std::make_shared<BucketDBOwner>(); _bucketDBHandler.reset(new bucketdb::BucketDBHandler(*_bucketDB)); - search::GrowStrategy attributeGrow(protonCfg.grow.initial, - protonCfg.grow.factor, - protonCfg.grow.add); - size_t attributeGrowNumDocs(protonCfg.grow.numdocs); + search::GrowStrategy searchableGrowth(growCfg.initial * distCfg.searchablecopies, growCfg.factor, growCfg.add); + search::GrowStrategy removedGrowth(std::max(1024l, growCfg.initial/100), growCfg.factor, growCfg.add); + search::GrowStrategy notReadyGrowth(growCfg.initial * (distCfg.redundancy - distCfg.searchablecopies), growCfg.factor, growCfg.add); + size_t attributeGrowNumDocs(growCfg.numdocs); size_t numSearcherThreads = protonCfg.numsearcherthreads; StoreOnlyDocSubDB::Context context(owner, @@ -74,7 +75,7 @@ DocumentSubDBCollection::DocumentSubDBCollection( (StoreOnlyDocSubDB::Config(docTypeName, "0.ready", baseDir, - attributeGrow, + searchableGrowth, attributeGrowNumDocs, _readySubDbId, SubDbType::READY), @@ -94,7 +95,7 @@ DocumentSubDBCollection::DocumentSubDBCollection( (new StoreOnlyDocSubDB(StoreOnlyDocSubDB::Config(docTypeName, "1.removed", baseDir, - attributeGrow, + removedGrowth, attributeGrowNumDocs, _remSubDbId, SubDbType::REMOVED), @@ -104,7 +105,7 @@ DocumentSubDBCollection::DocumentSubDBCollection( (StoreOnlyDocSubDB::Config(docTypeName, "2.notready", baseDir, - attributeGrow, + notReadyGrowth, attributeGrowNumDocs, _notReadySubDbId, SubDbType::NOTREADY), @@ -201,7 +202,7 @@ DocumentSubDBCollection::initViews(const DocumentDBConfig &configSnapshot, void -DocumentSubDBCollection::clearViews(void) +DocumentSubDBCollection::clearViews() { for (auto subDb : _subDBs) { subDb->clearViews(); @@ -210,7 +211,7 @@ DocumentSubDBCollection::clearViews(void) void -DocumentSubDBCollection::onReplayDone(void) +DocumentSubDBCollection::onReplayDone() { for (auto subDb : _subDBs) { subDb->onReplayDone(); @@ -228,7 +229,7 @@ DocumentSubDBCollection::onReprocessDone(SerialNum serialNum) SerialNum -DocumentSubDBCollection::getOldestFlushedSerial(void) +DocumentSubDBCollection::getOldestFlushedSerial() { SerialNum lowest = -1; for (auto subDb : _subDBs) { @@ -239,7 +240,7 @@ DocumentSubDBCollection::getOldestFlushedSerial(void) SerialNum -DocumentSubDBCollection::getNewestFlushedSerial(void) +DocumentSubDBCollection::getNewestFlushedSerial() { SerialNum highest = 0; for (auto subDb : _subDBs) { diff --git a/searchcorespi/src/vespa/searchcorespi/index/CMakeLists.txt b/searchcorespi/src/vespa/searchcorespi/index/CMakeLists.txt index 65b006f8ea9..84891e37307 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/CMakeLists.txt +++ b/searchcorespi/src/vespa/searchcorespi/index/CMakeLists.txt @@ -3,11 +3,13 @@ vespa_add_library(searchcorespi_index OBJECT SOURCES activediskindexes.cpp diskindexcleaner.cpp + disk_index_stats.cpp eventlogger.cpp fusionrunner.cpp iindexmanager.cpp iindexcollection.cpp index_manager_explorer.cpp + index_manager_stats.cpp indexcollection.cpp indexdisklayout.cpp indexflushtarget.cpp @@ -18,6 +20,8 @@ vespa_add_library(searchcorespi_index OBJECT indexmanagerconfig.cpp indexreadutilities.cpp indexsearchable.cpp + index_searchable_stats.cpp + memory_index_stats.cpp indexwriteutilities.cpp warmupindexcollection.cpp isearchableindexcollection.cpp diff --git a/searchcorespi/src/vespa/searchcorespi/index/disk_index_stats.cpp b/searchcorespi/src/vespa/searchcorespi/index/disk_index_stats.cpp new file mode 100644 index 00000000000..f9d5215d6f5 --- /dev/null +++ b/searchcorespi/src/vespa/searchcorespi/index/disk_index_stats.cpp @@ -0,0 +1,28 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/fastos/fastos.h> +#include "disk_index_stats.h" +#include "idiskindex.h" + + +namespace searchcorespi { +namespace index { + +DiskIndexStats::DiskIndexStats() + : IndexSearchableStats(), + _indexDir() +{ +} + +DiskIndexStats::DiskIndexStats(const IDiskIndex &index) + : IndexSearchableStats(index), + _indexDir(index.getIndexDir()) +{ +} + +DiskIndexStats::~DiskIndexStats() +{ +} + +} // namespace searchcorespi::index +} // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/disk_index_stats.h b/searchcorespi/src/vespa/searchcorespi/index/disk_index_stats.h new file mode 100644 index 00000000000..579a840b756 --- /dev/null +++ b/searchcorespi/src/vespa/searchcorespi/index/disk_index_stats.h @@ -0,0 +1,26 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +#include "index_searchable_stats.h" +#include <vespa/vespalib/stllike/string.h> + +namespace searchcorespi { +namespace index { + +struct IDiskIndex; + +/** + * Information about a disk index usable by state explorer. + */ +class DiskIndexStats : public IndexSearchableStats { + vespalib::string _indexDir; +public: + DiskIndexStats(); + DiskIndexStats(const IDiskIndex &index); + ~DiskIndexStats(); + + const vespalib::string &getIndexdir() const { return _indexDir; } +}; + +} // namespace searchcorespi::index +} // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/fakeindexsearchable.h b/searchcorespi/src/vespa/searchcorespi/index/fakeindexsearchable.h index 0db49c5f2df..00ede178339 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/fakeindexsearchable.h +++ b/searchcorespi/src/vespa/searchcorespi/index/fakeindexsearchable.h @@ -37,6 +37,12 @@ public: virtual search::SearchableStats getSearchableStats() const { return search::SearchableStats(); } + + virtual search::SerialNum getSerialNum() const override { return 0; } + virtual void accept(IndexSearchableVisitor &visitor) const override { + (void) visitor; + } + }; } // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/iindexmaintaineroperations.h b/searchcorespi/src/vespa/searchcorespi/index/iindexmaintaineroperations.h index 2b8a3ac3965..eb5edbf3298 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/iindexmaintaineroperations.h +++ b/searchcorespi/src/vespa/searchcorespi/index/iindexmaintaineroperations.h @@ -19,7 +19,7 @@ struct IIndexMaintainerOperations { /** * Creates a new memory index using the given schema. */ - virtual IMemoryIndex::SP createMemoryIndex(const search::index::Schema &schema) = 0; + virtual IMemoryIndex::SP createMemoryIndex(const search::index::Schema &schema, search::SerialNum serialNum) = 0; /** * Loads a disk index from the given directory. diff --git a/searchcorespi/src/vespa/searchcorespi/index/imemoryindex.h b/searchcorespi/src/vespa/searchcorespi/index/imemoryindex.h index fabaf730cb7..c8baaf9ca13 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/imemoryindex.h +++ b/searchcorespi/src/vespa/searchcorespi/index/imemoryindex.h @@ -61,7 +61,7 @@ struct IMemoryIndex : public searchcorespi::IndexSearchable { /** * Commits the inserts and removes since the last commit, making them searchable. **/ - virtual void commit(OnWriteDoneType onWriteDone) = 0; + virtual void commit(OnWriteDoneType onWriteDone, search::SerialNum serialNum) = 0; /** * Flushes this memory index to disk as a disk index. diff --git a/searchcorespi/src/vespa/searchcorespi/index/index_manager_explorer.cpp b/searchcorespi/src/vespa/searchcorespi/index/index_manager_explorer.cpp index f3695f2bd52..959f5ccdc59 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/index_manager_explorer.cpp +++ b/searchcorespi/src/vespa/searchcorespi/index/index_manager_explorer.cpp @@ -4,14 +4,41 @@ #include <vespa/log/log.h> LOG_SETUP(".searchcorespi.index.index_manager_explorer"); #include "index_manager_explorer.h" +#include "index_manager_stats.h" #include <vespa/vespalib/data/slime/cursor.h> using vespalib::slime::Cursor; using vespalib::slime::Inserter; +using search::SearchableStats; +using searchcorespi::index::DiskIndexStats; +using searchcorespi::index::MemoryIndexStats; namespace searchcorespi { +namespace { + +void insertDiskIndex(Cursor &arrayCursor, const DiskIndexStats &diskIndex) +{ + Cursor &diskIndexCursor = arrayCursor.addObject(); + const SearchableStats &sstats = diskIndex.getSearchableStats(); + diskIndexCursor.setLong("serialNum", diskIndex.getSerialNum()); + diskIndexCursor.setString("indexDir", diskIndex.getIndexdir()); + diskIndexCursor.setLong("sizeOnDisk", sstats.sizeOnDisk()); +} + +void insertMemoryIndex(Cursor &arrayCursor, const MemoryIndexStats &memoryIndex) +{ + Cursor &memoryIndexCursor = arrayCursor.addObject(); + const SearchableStats &sstats = memoryIndex.getSearchableStats(); + memoryIndexCursor.setLong("serialNum", memoryIndex.getSerialNum()); + memoryIndexCursor.setLong("docsInMemory", sstats.docsInMemory()); + memoryIndexCursor.setLong("memoryUsage", sstats.memoryUsage()); +} + +} + + IndexManagerExplorer::IndexManagerExplorer(IIndexManager::SP mgr) : _mgr(std::move(mgr)) { @@ -20,9 +47,19 @@ IndexManagerExplorer::IndexManagerExplorer(IIndexManager::SP mgr) void IndexManagerExplorer::get_state(const Inserter &inserter, bool full) const { - (void) full; Cursor &object = inserter.insertObject(); object.setLong("lastSerialNum", _mgr->getCurrentSerialNum()); + if (full) { + IndexManagerStats stats(*_mgr); + Cursor &diskIndexArrayCursor = object.setArray("diskIndexes"); + for (const auto &diskIndex : stats.getDiskIndexes()) { + insertDiskIndex(diskIndexArrayCursor, diskIndex); + } + Cursor &memoryIndexArrayCursor = object.setArray("memoryIndexes"); + for (const auto &memoryIndex : stats.getMemoryIndexes()) { + insertMemoryIndex(memoryIndexArrayCursor, memoryIndex); + } + } } } // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/index_manager_stats.cpp b/searchcorespi/src/vespa/searchcorespi/index/index_manager_stats.cpp new file mode 100644 index 00000000000..035a5698c2f --- /dev/null +++ b/searchcorespi/src/vespa/searchcorespi/index/index_manager_stats.cpp @@ -0,0 +1,61 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/fastos/fastos.h> +#include "index_manager_stats.h" +#include "iindexmanager.h" +#include "indexsearchablevisitor.h" + + +namespace searchcorespi { + +namespace { + +class Visitor : public IndexSearchableVisitor +{ +public: + std::vector<index::DiskIndexStats> _diskIndexes; + std::vector<index::MemoryIndexStats> _memoryIndexes; + + Visitor() + : _diskIndexes(), + _memoryIndexes() + { + } + virtual void visit(const index::IDiskIndex &index) override { + _diskIndexes.emplace_back(index); + } + virtual void visit(const index::IMemoryIndex &index) override { + _memoryIndexes.emplace_back(index); + } + + void normalize() { + std::sort(_diskIndexes.begin(), _diskIndexes.end()); + std::sort(_memoryIndexes.begin(), _memoryIndexes.end()); + } +}; + +} + +IndexManagerStats::IndexManagerStats() + : _diskIndexes(), + _memoryIndexes() +{ +} + +IndexManagerStats::IndexManagerStats(const IIndexManager &indexManager) + : _diskIndexes(), + _memoryIndexes() +{ + Visitor visitor; + IndexSearchable::SP searchable(indexManager.getSearchable()); + searchable->accept(visitor); + visitor.normalize(); + _diskIndexes = std::move(visitor._diskIndexes); + _memoryIndexes = std::move(visitor._memoryIndexes); +} + +IndexManagerStats::~IndexManagerStats() +{ +} + +} // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/index_manager_stats.h b/searchcorespi/src/vespa/searchcorespi/index/index_manager_stats.h new file mode 100644 index 00000000000..048124b688f --- /dev/null +++ b/searchcorespi/src/vespa/searchcorespi/index/index_manager_stats.h @@ -0,0 +1,30 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +#include "disk_index_stats.h" +#include "memory_index_stats.h" + +namespace searchcorespi { + +class IIndexManager; + +/** + * Information about an index manager usable by state explorer. + */ +class IndexManagerStats { + std::vector<index::DiskIndexStats> _diskIndexes; + std::vector<index::MemoryIndexStats> _memoryIndexes; +public: + IndexManagerStats(); + IndexManagerStats(const IIndexManager &indexManager); + ~IndexManagerStats(); + + const std::vector<index::DiskIndexStats> &getDiskIndexes() const { + return _diskIndexes; + } + const std::vector<index::MemoryIndexStats> &getMemoryIndexes() const { + return _memoryIndexes; + } +}; + +} // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/index_searchable_stats.cpp b/searchcorespi/src/vespa/searchcorespi/index/index_searchable_stats.cpp new file mode 100644 index 00000000000..c5ce5163aac --- /dev/null +++ b/searchcorespi/src/vespa/searchcorespi/index/index_searchable_stats.cpp @@ -0,0 +1,29 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/fastos/fastos.h> +#include "index_searchable_stats.h" +#include "indexsearchable.h" + + +namespace searchcorespi { +namespace index { + +IndexSearchableStats::IndexSearchableStats() + : _serialNum(0), + _searchableStats() +{ +} + +IndexSearchableStats::IndexSearchableStats(const IndexSearchable &index) + : _serialNum(index.getSerialNum()), + _searchableStats(index.getSearchableStats()) +{ +} + +bool IndexSearchableStats::operator<(const IndexSearchableStats &rhs) const +{ + return _serialNum < rhs._serialNum; +} + +} // namespace searchcorespi::index +} // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/index_searchable_stats.h b/searchcorespi/src/vespa/searchcorespi/index/index_searchable_stats.h new file mode 100644 index 00000000000..dd904b3b286 --- /dev/null +++ b/searchcorespi/src/vespa/searchcorespi/index/index_searchable_stats.h @@ -0,0 +1,32 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <vespa/searchlib/common/serialnum.h> +#include <vespa/searchlib/util/searchable_stats.h> + +namespace searchcorespi { + +class IndexSearchable; + +namespace index { + +/** + * Information about a searchable index usable by state explorer. + */ +class IndexSearchableStats +{ + using SerialNum = search::SerialNum; + using SearchableStats = search::SearchableStats; + SerialNum _serialNum; + SearchableStats _searchableStats; +public: + IndexSearchableStats(); + IndexSearchableStats(const IndexSearchable &index); + bool operator<(const IndexSearchableStats &rhs) const; + SerialNum getSerialNum() const { return _serialNum; } + const SearchableStats &getSearchableStats() const { return _searchableStats; } +}; + +} // namespace searchcorespi::index +} // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexcollection.cpp b/searchcorespi/src/vespa/searchcorespi/index/indexcollection.cpp index 0284c4a682e..653cf0a28b4 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/indexcollection.cpp +++ b/searchcorespi/src/vespa/searchcorespi/index/indexcollection.cpp @@ -9,6 +9,7 @@ LOG_SETUP(".searchcorespi.index.indexcollection"); #include <vespa/searchlib/queryeval/create_blueprint_visitor_helper.h> #include <vespa/searchlib/queryeval/intermediate_blueprints.h> #include <vespa/searchlib/queryeval/leaf_blueprints.h> +#include "indexsearchablevisitor.h" using namespace search::queryeval; using namespace search::query; @@ -116,6 +117,25 @@ IndexCollection::getSearchableStats() const return stats; } +search::SerialNum +IndexCollection::getSerialNum() const +{ + search::SerialNum serialNum = 0; + for (auto &source : _sources) { + serialNum = std::max(serialNum, source.source_wrapper->getSerialNum()); + } + return serialNum; +} + + +void +IndexCollection::accept(IndexSearchableVisitor &visitor) const +{ + for (auto &source : _sources) { + source.source_wrapper->accept(visitor); + } +} + namespace { struct Mixer { diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexcollection.h b/searchcorespi/src/vespa/searchcorespi/index/indexcollection.h index 223b36fce99..38942abc9dc 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/indexcollection.h +++ b/searchcorespi/src/vespa/searchcorespi/index/indexcollection.h @@ -61,6 +61,8 @@ public: const Node &term, const IAttributeContext &attrCtx); virtual search::SearchableStats getSearchableStats() const; + virtual search::SerialNum getSerialNum() const override; + virtual void accept(IndexSearchableVisitor &visitor) const override; static ISearchableIndexCollection::UP replaceAndRenumber( const ISourceSelector::SP & selector, diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp index 36d42e245e1..3db3875b561 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp +++ b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp @@ -117,6 +117,12 @@ public: return _index->createBlueprint(requestContext, fields, term, attrCtx); } virtual search::SearchableStats getSearchableStats() const { return _index->getSearchableStats(); } + virtual search::SerialNum getSerialNum() const override { + return _index->getSerialNum(); + } + virtual void accept(IndexSearchableVisitor &visitor) const override { + _index->accept(visitor); + } /** * Implements IDiskIndex @@ -293,7 +299,8 @@ IDiskIndex::SP IndexMaintainer::flushMemoryIndex(IMemoryIndex &memoryIndex, uint32_t indexId, uint32_t docIdLimit, - SerialNum serialNum) + SerialNum serialNum, + FixedSourceSelector::SaveInfo &saveInfo) { // Called by a flush worker thread const string flushDir = getFlushDir(indexId); @@ -302,6 +309,11 @@ IndexMaintainer::flushMemoryIndex(IMemoryIndex &memoryIndex, if (wtSchema.get() != NULL) { updateDiskIndexSchema(flushDir, *wtSchema, noSerialNumHigh); } + IndexWriteUtilities::writeSourceSelector(saveInfo, indexId, getAttrTune(), + _ctx.getFileHeaderContext(), + serialNum); + IndexWriteUtilities::writeSerialNum(serialNum, flushDir, + _ctx.getFileHeaderContext()); return loadDiskIndex(flushDir); } @@ -521,14 +533,8 @@ IndexMaintainer::flushMemoryIndex(FlushArgs &args, IMemoryIndex &memoryIndex = *args.old_index; Schema::SP wtSchema = memoryIndex.getWipeTimeSchema(); IDiskIndex::SP diskIndex = flushMemoryIndex(memoryIndex, args.old_absolute_id, - docIdLimit, args.flush_serial_num); - IndexWriteUtilities::writeSourceSelector(saveInfo, args.old_absolute_id, - getAttrTune(), _ctx.getFileHeaderContext(), - args.flush_serial_num); - IndexWriteUtilities::writeSerialNum(args.flush_serial_num, - getFlushDir(args.old_absolute_id), - _ctx.getFileHeaderContext()); - + docIdLimit, args.flush_serial_num, + saveInfo); // Post processing after memory index has been written to disk and // opened as disk index. args._changeGens = changeGens; @@ -799,7 +805,7 @@ IndexMaintainer::IndexMaintainer(const IndexMaintainerConfig &config, _last_fusion_id(), _next_id(), _current_index_id(), - _current_index(operations.createMemoryIndex(_schema)), + _current_index(), _current_serial_num(0), _flush_serial_num(0), _lastFlushTime(), @@ -848,6 +854,7 @@ IndexMaintainer::IndexMaintainer(const IndexMaintainerConfig &config, _selector.reset(getSourceSelector().cloneAndSubtract(ost.str(), id_diff).release()); assert(_last_fusion_id == _selector->getBaseId()); } + _current_index = operations.createMemoryIndex(_schema, _current_serial_num); _current_index_id = getNewAbsoluteId() - _last_fusion_id; assert(_current_index_id < ISourceSelector::SOURCE_LIMIT); ISearchableIndexCollection::UP sourceList(loadDiskIndexes(spec, ISearchableIndexCollection::UP(new IndexCollection(_selector)))); @@ -874,7 +881,7 @@ IndexMaintainer::initFlush(SerialNum serialNum, searchcorespi::FlushStats * stat _current_serial_num = std::max(_current_serial_num, serialNum); } - IMemoryIndex::SP new_index(_operations.createMemoryIndex(getSchema())); + IMemoryIndex::SP new_index(_operations.createMemoryIndex(getSchema(), _current_serial_num)); FlushArgs args; args.stats = stats; scheduleCommit(); @@ -1132,7 +1139,8 @@ IndexMaintainer::commit() // only triggered via scheduleCommit() assert(_ctx.getThreadingService().index().isCurrentThread()); LockGuard lock(_index_update_lock); - _current_index->commit(std::shared_ptr<search::IDestructorCallback>()); + _current_index->commit(std::shared_ptr<search::IDestructorCallback>(), + _current_serial_num); // caller calls _ctx.getThreadingService().sync() } @@ -1142,7 +1150,7 @@ IndexMaintainer::commit(SerialNum serialNum, OnWriteDoneType onWriteDone) assert(_ctx.getThreadingService().index().isCurrentThread()); LockGuard lock(_index_update_lock); _current_serial_num = serialNum; - _current_index->commit(onWriteDone); + _current_index->commit(onWriteDone, serialNum); } void @@ -1169,7 +1177,7 @@ void IndexMaintainer::setSchema(const Schema & schema, const Schema & fusionSchema) { assert(_ctx.getThreadingService().master().isCurrentThread()); - IMemoryIndex::SP new_index(_operations.createMemoryIndex(schema)); + IMemoryIndex::SP new_index(_operations.createMemoryIndex(schema, _current_serial_num)); SetSchemaArgs args; args._newSchema = schema; diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h index 417f79fa6ca..39e3d116d44 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h +++ b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.h @@ -185,7 +185,8 @@ class IndexMaintainer : public IIndexManager, flushMemoryIndex(IMemoryIndex &memoryIndex, uint32_t indexId, uint32_t docIdLimit, - SerialNum serialNum); + SerialNum serialNum, + search::FixedSourceSelector::SaveInfo &saveInfo); ISearchableIndexCollection::UP loadDiskIndexes(const FusionSpec &spec, ISearchableIndexCollection::UP sourceList); void replaceSource(uint32_t sourceId, const IndexSearchable::SP &source); diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexsearchable.h b/searchcorespi/src/vespa/searchcorespi/index/indexsearchable.h index 2d4d7cd9674..89a7c8d9f07 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/indexsearchable.h +++ b/searchcorespi/src/vespa/searchcorespi/index/indexsearchable.h @@ -8,9 +8,12 @@ #include <vespa/searchlib/queryeval/blueprint.h> #include <vespa/searchlib/queryeval/irequestcontext.h> #include <vespa/searchlib/util/searchable_stats.h> +#include <vespa/searchlib/common/serialnum.h> namespace searchcorespi { +class IndexSearchableVisitor; + /** * Abstract class extended by components to expose content that can be * searched by a query term. A IndexSearchable component supports searching @@ -72,6 +75,17 @@ public: * Returns the searchable stats for this index searchable. */ virtual search::SearchableStats getSearchableStats() const = 0; + + /* + * Returns the serial number for this index searchable. + */ + virtual search::SerialNum getSerialNum() const = 0; + + /* + * Calls visitor with properly downcasted argument to differentiate + * between different types of indexes (disk index or memory index). + */ + virtual void accept(IndexSearchableVisitor &visitor) const = 0; }; } // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexsearchablevisitor.h b/searchcorespi/src/vespa/searchcorespi/index/indexsearchablevisitor.h new file mode 100644 index 00000000000..8238f916c99 --- /dev/null +++ b/searchcorespi/src/vespa/searchcorespi/index/indexsearchablevisitor.h @@ -0,0 +1,26 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace searchcorespi { + +namespace index { + +struct IDiskIndex; +struct IMemoryIndex; + +} + +/* + * Interface for visiting an index searchable containing disk and + * memory indexes. + */ +class IndexSearchableVisitor +{ +public: + virtual ~IndexSearchableVisitor() { } + virtual void visit(const index::IDiskIndex &index) = 0; + virtual void visit(const index::IMemoryIndex &index) = 0; +}; + +} // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/memory_index_stats.cpp b/searchcorespi/src/vespa/searchcorespi/index/memory_index_stats.cpp new file mode 100644 index 00000000000..7805702fd03 --- /dev/null +++ b/searchcorespi/src/vespa/searchcorespi/index/memory_index_stats.cpp @@ -0,0 +1,26 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/fastos/fastos.h> +#include "memory_index_stats.h" +#include "imemoryindex.h" + + +namespace searchcorespi { +namespace index { + +MemoryIndexStats::MemoryIndexStats() + : IndexSearchableStats() +{ +} + +MemoryIndexStats::MemoryIndexStats(const IMemoryIndex &index) + : IndexSearchableStats(index) +{ +} + +MemoryIndexStats::~MemoryIndexStats() +{ +} + +} // namespace searchcorespi::index +} // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/memory_index_stats.h b/searchcorespi/src/vespa/searchcorespi/index/memory_index_stats.h new file mode 100644 index 00000000000..7508ecef3cc --- /dev/null +++ b/searchcorespi/src/vespa/searchcorespi/index/memory_index_stats.h @@ -0,0 +1,22 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +#include "index_searchable_stats.h" + +namespace searchcorespi { +namespace index { + +struct IMemoryIndex; + +/** + * Information about a memory index usable by state explorer. + */ +class MemoryIndexStats : public IndexSearchableStats { +public: + MemoryIndexStats(); + MemoryIndexStats(const IMemoryIndex &index); + ~MemoryIndexStats(); +}; + +} // namespace searchcorespi::index +} // namespace searchcorespi diff --git a/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp b/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp index c993c79a93e..53a2a4d59ea 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp +++ b/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.cpp @@ -184,6 +184,22 @@ WarmupIndexCollection::getSearchableStats() const return _prev->getSearchableStats(); } + +search::SerialNum +WarmupIndexCollection::getSerialNum() const +{ + return std::max(_prev->getSerialNum(), _next->getSerialNum()); +} + + +void +WarmupIndexCollection::accept(IndexSearchableVisitor &visitor) const +{ + _prev->accept(visitor); + _next->accept(visitor); +} + + void WarmupIndexCollection::append(uint32_t id, const IndexSearchable::SP &source) { diff --git a/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.h b/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.h index a304b3b9df2..be25b81d498 100644 --- a/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.h +++ b/searchcorespi/src/vespa/searchcorespi/index/warmupindexcollection.h @@ -49,6 +49,8 @@ public: const Node &term, const IAttributeContext &attrCtx); search::SearchableStats getSearchableStats() const override; + search::SerialNum getSerialNum() const override; + void accept(IndexSearchableVisitor &visitor) const override; // Implements ISearchableIndexCollection void append(uint32_t id, const IndexSearchable::SP &source) override; diff --git a/searchlib/src/apps/vespa-ranking-expression-analyzer/vespa-ranking-expression-analyzer.cpp b/searchlib/src/apps/vespa-ranking-expression-analyzer/vespa-ranking-expression-analyzer.cpp index df3c99b21fc..42676c715d3 100644 --- a/searchlib/src/apps/vespa-ranking-expression-analyzer/vespa-ranking-expression-analyzer.cpp +++ b/searchlib/src/apps/vespa-ranking-expression-analyzer/vespa-ranking-expression-analyzer.cpp @@ -191,7 +191,7 @@ struct FunctionInfo { size_t get_path_len(const TreeList &trees) const { size_t path = 0; for (const Node *tree: trees) { - InterpretedFunction ifun(DefaultTensorEngine::ref(), *tree, params.size()); + InterpretedFunction ifun(DefaultTensorEngine::ref(), *tree, params.size(), NodeTypes()); InterpretedFunction::Context ctx; for (double param: params) { ctx.add_param(param); diff --git a/searchlib/src/tests/attribute/multivaluemapping/multivaluemapping_test.cpp b/searchlib/src/tests/attribute/multivaluemapping/multivaluemapping_test.cpp index e78e180856b..48768da32c5 100644 --- a/searchlib/src/tests/attribute/multivaluemapping/multivaluemapping_test.cpp +++ b/searchlib/src/tests/attribute/multivaluemapping/multivaluemapping_test.cpp @@ -3,8 +3,6 @@ #include <vespa/log/log.h> LOG_SETUP("multivaluemapping_test"); #include <vespa/vespalib/testkit/testapp.h> -//#define DEBUG_MULTIVALUE_MAPPING -//#define LOG_MULTIVALUE_MAPPING #include <vespa/searchlib/attribute/multivaluemapping.h> #include <algorithm> #include <limits> @@ -117,7 +115,7 @@ MultiValueMappingTest::testIndex64() EXPECT_EQUAL(idx.alternative(), 0u); EXPECT_EQUAL(idx.vectorIdx(), 6u); EXPECT_EQUAL(idx.offset(), 1000u); - EXPECT_EQUAL(idx.idx(), 0x3000003e8ull); + EXPECT_EQUAL(idx.idx(), 0x6000003e8ul); } { Index64 idx(15, 1, 134217727); @@ -125,11 +123,20 @@ MultiValueMappingTest::testIndex64() EXPECT_EQUAL(idx.alternative(), 1u); EXPECT_EQUAL(idx.vectorIdx(), 31u); EXPECT_EQUAL(idx.offset(), 134217727u); - EXPECT_EQUAL(idx.idx(), 0xf87ffffffull); + EXPECT_EQUAL(idx.idx(), 0x1f07fffffful); } { - EXPECT_EQUAL(Index64::maxValues(), 1023u); + Index64 idx(3087, 1, 0xfffffffful); + EXPECT_EQUAL(idx.values(), 3087u); + EXPECT_EQUAL(idx.alternative(), 1u); + EXPECT_EQUAL(idx.vectorIdx(), (3087u << 1) + 1); + EXPECT_EQUAL(idx.offset(), 0xfffffffful); + EXPECT_EQUAL(idx.idx(), 0x181ffffffffful); + } + { + EXPECT_EQUAL(Index64::maxValues(), 4095u); EXPECT_EQUAL(Index64::alternativeSize(), 2u); + EXPECT_EQUAL(Index64::offsetSize(), 0x1ul << 32); } } @@ -160,9 +167,6 @@ MultiValueMappingTest::testSimpleSetAndGet() } else { EXPECT_EQUAL(idx.values(), Index::maxValues()); } -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, "------------------------------------------------------------"); -#endif } EXPECT_TRUE(!mvm.hasKey(numKeys)); @@ -221,9 +225,6 @@ MultiValueMappingTest::testChangingValueCount() // Increasing the value count for some keys for (uint32_t valueCount = 1; valueCount <= maxCount; ++valueCount) { -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, "########################### %u ##############################", valueCount); -#endif uint32_t lastValueCount = valueCount - 1; // set values for (uint32_t key = 0; key < numKeys; ++key) { @@ -271,10 +272,6 @@ MultiValueMappingTest::checkReaders(MvMapping &mvm, for (ReaderVector::iterator iter = readers.begin(); iter != readers.end(); ) { if (iter->_endGen <= mvmGen) { -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, "check and remove reader: start = %u, end = %u", - iter->_startGen, iter->_endGen); -#endif for (uint32_t key = 0; key < iter->numKeys(); ++key) { Index idx = iter->_indices[key]; uint32_t valueCount = iter->_expected[key].size(); @@ -321,11 +318,6 @@ MultiValueMappingTest::testHoldListAndGeneration() generation_t mvmGen = 0u; for (uint32_t valueCount = 1; valueCount < maxCount; ++valueCount) { -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, "#################### count(%u) - gen(%u) ####################", - valueCount, mvm.getGeneration()); -#endif - // check and remove readers checkReaders(mvm, mvmGen, readers); diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp index ab8715a29dc..cdb9f3366a4 100644 --- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp +++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp @@ -7,7 +7,6 @@ LOG_SETUP("tensorattribute_test"); #include <vespa/searchlib/attribute/attributeguard.h> #include <vespa/vespalib/tensor/tensor_factory.h> #include <vespa/vespalib/tensor/default_tensor.h> -#include <vespa/vespalib/tensor/simple/simple_tensor_builder.h> using search::attribute::TensorAttribute; using search::AttributeGuard; @@ -17,7 +16,6 @@ using vespalib::tensor::Tensor; using vespalib::tensor::TensorCells; using vespalib::tensor::TensorDimensions; using vespalib::tensor::TensorFactory; -using vespalib::tensor::SimpleTensorBuilder; namespace vespalib { namespace tensor { diff --git a/searchlib/src/tests/datastore/logdatastore_test.cpp b/searchlib/src/tests/datastore/logdatastore_test.cpp index 2d1b3c48edb..00d4568df3e 100644 --- a/searchlib/src/tests/datastore/logdatastore_test.cpp +++ b/searchlib/src/tests/datastore/logdatastore_test.cpp @@ -19,6 +19,8 @@ LOG_SETUP("datastore_test"); using document::BucketId; using namespace search::docstore; using namespace search; +using namespace vespalib::alloc; +using vespalib::DefaultAlloc; using search::index::DummyFileHeaderContext; class MyTlSyncer : public transactionlog::SyncProxy { @@ -144,7 +146,7 @@ TEST("test that DirectIOPadding works accordng to spec") { FastOS_File file("directio.test"); file.EnableDirectIO(); EXPECT_TRUE(file.OpenReadWrite()); - vespalib::AlignedHeapAlloc buf(FILE_SIZE, 4096); + Alloc buf(DefaultAlloc::create(FILE_SIZE, MemoryAllocator::HUGEPAGE_SIZE, 4096)); memset(buf.get(), 'a', buf.size()); EXPECT_EQUAL(FILE_SIZE, file.Write2(buf.get(), FILE_SIZE)); size_t padBefore(0); @@ -528,8 +530,8 @@ verifyCacheStats(CacheStats cs, size_t hits, size_t misses, size_t elements, siz EXPECT_EQUAL(hits, cs.hits); EXPECT_EQUAL(misses, cs.misses); EXPECT_EQUAL(elements, cs.elements); - EXPECT_LESS_EQUAL(memory_used, cs.memory_used + 10); // We allow +- 10 as visitorder and hence compressability is non-deterministic. - EXPECT_GREATER_EQUAL(memory_used+10, cs.memory_used); + EXPECT_LESS_EQUAL(memory_used, cs.memory_used + 20); // We allow +- 20 as visitorder and hence compressability is non-deterministic. + EXPECT_GREATER_EQUAL(memory_used+20, cs.memory_used); } TEST("test that the integrated visit cache works.") { @@ -543,41 +545,41 @@ TEST("test that the integrated visit cache works.") { for (size_t i(1); i <= 100; i++) { vcs.verifyRead(i); } - TEST_DO(verifyCacheStats(ds.getCacheStats(), 0, 100, 100, 19774)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 0, 100, 100, 20574)); for (size_t i(1); i <= 100; i++) { vcs.verifyRead(i); } - TEST_DO(verifyCacheStats(ds.getCacheStats(), 100, 100, 100, 19774)); // From the individual cache. + TEST_DO(verifyCacheStats(ds.getCacheStats(), 100, 100, 100, 20574)); // From the individual cache. vcs.verifyVisit({7,9,17,19,67,88}, false); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 100, 100, 100, 19774)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 100, 100, 100, 20574)); vcs.verifyVisit({7,9,17,19,67,88}, true); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 100, 101, 101, 20335)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 100, 101, 101, 21135)); vcs.verifyVisit({7,9,17,19,67,88}, true); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 101, 101, 20335)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 101, 101, 21135)); vcs.rewrite(8); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 101, 100, 20130)); // From the individual cache. + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 101, 100, 20922)); // From the individual cache. vcs.rewrite(7); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 101, 98, 19364)); // From the both caches. + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 101, 98, 20148)); // From the both caches. vcs.verifyVisit({7,9,17,19,67,88}, true); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 102, 99, 19948)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 102, 99, 20732)); vcs.verifyVisit({7,9,17,19,67,88,89}, true); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 103, 99, 19999)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 103, 99, 20783)); vcs.rewrite(17); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 103, 97, 19167)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 103, 97, 19943)); vcs.verifyVisit({7,9,17,19,67,88,89}, true); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 104, 98, 19821)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 104, 98, 20587)); vcs.remove(17); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 104, 97, 19167)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 104, 97, 19943)); vcs.verifyVisit({7,9,17,19,67,88,89}, {7,9,19,67,88,89}, true); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 105, 98, 19750)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 105, 98, 20526)); vcs.verifyVisit({41, 42}, true); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 106, 99, 20044)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 106, 99, 20820)); vcs.verifyVisit({43, 44}, true); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 107, 100, 20348)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 107, 100, 21124)); vcs.verifyVisit({41, 42, 43, 44}, true); - TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 108, 99, 20168)); + TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 108, 99, 20944)); } TEST("testWriteRead") { diff --git a/searchlib/src/tests/features/beta/beta_features.cpp b/searchlib/src/tests/features/beta/beta_features.cpp index e5642f475de..1bc75d6f3bb 100644 --- a/searchlib/src/tests/features/beta/beta_features.cpp +++ b/searchlib/src/tests/features/beta/beta_features.cpp @@ -3,7 +3,6 @@ #include <vespa/log/log.h> LOG_SETUP("beta_features_test"); -#include <boost/tokenizer.hpp> #include <vespa/searchlib/attribute/attributeguard.h> #include <vespa/searchlib/attribute/attributemanager.h> #include <vespa/searchlib/attribute/attributevector.h> diff --git a/searchlib/src/tests/features/featurebenchmark.cpp b/searchlib/src/tests/features/featurebenchmark.cpp index 14e43fa7d47..dc9d94907b4 100644 --- a/searchlib/src/tests/features/featurebenchmark.cpp +++ b/searchlib/src/tests/features/featurebenchmark.cpp @@ -7,7 +7,6 @@ LOG_SETUP("featurebenchmark"); #include <iomanip> #include <iostream> #include <string> -#include <boost/tokenizer.hpp> #include <vespa/searchlib/attribute/attributefactory.h> #include <vespa/searchlib/attribute/attributevector.h> diff --git a/searchlib/src/tests/features/prod_features.cpp b/searchlib/src/tests/features/prod_features.cpp index 914f49ca732..c414c2598e5 100644 --- a/searchlib/src/tests/features/prod_features.cpp +++ b/searchlib/src/tests/features/prod_features.cpp @@ -4,7 +4,6 @@ LOG_SETUP("prod_features_test"); #include "prod_features.h" -#include <boost/tokenizer.hpp> #include <vespa/searchlib/attribute/attributeguard.h> #include <vespa/searchlib/attribute/attributefactory.h> #include <vespa/searchlib/attribute/attributevector.h> diff --git a/searchlib/src/tests/index/doctypebuilder/doctypebuilder_test.cpp b/searchlib/src/tests/index/doctypebuilder/doctypebuilder_test.cpp index 3980700fa6b..86a1b8f3d23 100644 --- a/searchlib/src/tests/index/doctypebuilder/doctypebuilder_test.cpp +++ b/searchlib/src/tests/index/doctypebuilder/doctypebuilder_test.cpp @@ -2,8 +2,6 @@ #include <vespa/fastos/fastos.h> #include <vespa/log/log.h> LOG_SETUP("doctypebuilder_test"); -#include <boost/algorithm/string/classification.hpp> -#include <boost/algorithm/string/split.hpp> #include <vespa/document/repo/documenttyperepo.h> #include <vespa/searchlib/index/doctypebuilder.h> #include <vespa/vespalib/testkit/testapp.h> diff --git a/searchlib/src/tests/memoryindex/memoryindex/memoryindex_test.cpp b/searchlib/src/tests/memoryindex/memoryindex/memoryindex_test.cpp index 7d2afc151d5..93ae4d9636b 100644 --- a/searchlib/src/tests/memoryindex/memoryindex/memoryindex_test.cpp +++ b/searchlib/src/tests/memoryindex/memoryindex/memoryindex_test.cpp @@ -376,6 +376,7 @@ TEST("requireThatNumDocsAndDocIdLimitIsReturned") TEST("requireThatWeUnderstandTheMemoryFootprint") { + constexpr size_t BASE_SIZE = 118860u; { Setup setup; Index index(setup); @@ -384,12 +385,12 @@ TEST("requireThatWeUnderstandTheMemoryFootprint") } { Index index(Setup().field("f1")); - EXPECT_EQUAL(118852u, index.index.getStaticMemoryFootprint()); + EXPECT_EQUAL(BASE_SIZE, index.index.getStaticMemoryFootprint()); EXPECT_EQUAL(index.index.getStaticMemoryFootprint(), index.index.getMemoryUsage().allocatedBytes()); } { Index index(Setup().field("f1").field("f2")); - EXPECT_EQUAL(2*118852u, index.index.getStaticMemoryFootprint()); + EXPECT_EQUAL(2 * BASE_SIZE, index.index.getStaticMemoryFootprint()); EXPECT_EQUAL(index.index.getStaticMemoryFootprint(), index.index.getMemoryUsage().allocatedBytes()); } } diff --git a/searchlib/src/tests/predicate/document_features_store_test.cpp b/searchlib/src/tests/predicate/document_features_store_test.cpp index 4baf2d03fbe..72b93dc4d08 100644 --- a/searchlib/src/tests/predicate/document_features_store_test.cpp +++ b/searchlib/src/tests/predicate/document_features_store_test.cpp @@ -166,17 +166,17 @@ TEST("require that both features and ranges are removed by 'remove'") { TEST("require that both features and ranges counts towards memory usage") { DocumentFeaturesStore features_store(10); - EXPECT_EQUAL(332u, features_store.getMemoryUsage().usedBytes()); + EXPECT_EQUAL(364u, features_store.getMemoryUsage().usedBytes()); PredicateTreeAnnotations annotations; annotations.features.push_back(PredicateHash::hash64("foo=100-199")); features_store.insert(annotations, doc_id); - EXPECT_EQUAL(340u, features_store.getMemoryUsage().usedBytes()); + EXPECT_EQUAL(372u, features_store.getMemoryUsage().usedBytes()); annotations.features.clear(); annotations.range_features.push_back({"foo", 100, 199}); features_store.insert(annotations, doc_id + 1); - EXPECT_EQUAL(436u, features_store.getMemoryUsage().usedBytes()); + EXPECT_EQUAL(468u, features_store.getMemoryUsage().usedBytes()); } TEST("require that DocumentFeaturesStore can be serialized") { @@ -191,7 +191,7 @@ TEST("require that DocumentFeaturesStore can be serialized") { expectHash("foo=bar", features); expectHash("foo=100-199", features); - vespalib::MMapDataBuffer buffer; + vespalib::DataBuffer buffer; features_store.serialize(buffer); DocumentFeaturesStore features_store2(buffer); @@ -206,17 +206,17 @@ TEST("require that serialization cleans up wordstore") { PredicateTreeAnnotations annotations; annotations.range_features.push_back({"foo", 100, 199}); features_store.insert(annotations, doc_id); - EXPECT_EQUAL(428u, features_store.getMemoryUsage().usedBytes()); + EXPECT_EQUAL(460u, features_store.getMemoryUsage().usedBytes()); annotations.range_features.push_back({"bar", 100, 199}); features_store.insert(annotations, doc_id + 1); - EXPECT_EQUAL(720u, features_store.getMemoryUsage().usedBytes()); + EXPECT_EQUAL(800u, features_store.getMemoryUsage().usedBytes()); features_store.remove(doc_id + 1); - EXPECT_EQUAL(672u, features_store.getMemoryUsage().usedBytes()); + EXPECT_EQUAL(752u, features_store.getMemoryUsage().usedBytes()); - vespalib::MMapDataBuffer buffer; + vespalib::DataBuffer buffer; features_store.serialize(buffer); DocumentFeaturesStore features_store2(buffer); - EXPECT_EQUAL(428u, features_store2.getMemoryUsage().usedBytes()); + EXPECT_EQUAL(460u, features_store2.getMemoryUsage().usedBytes()); } diff --git a/searchlib/src/tests/predicate/predicate_index_test.cpp b/searchlib/src/tests/predicate/predicate_index_test.cpp index b22c80294d0..e4d9aeef1f0 100644 --- a/searchlib/src/tests/predicate/predicate_index_test.cpp +++ b/searchlib/src/tests/predicate/predicate_index_test.cpp @@ -277,7 +277,7 @@ TEST("require that PredicateIndex can be (de)serialized") { } index.commit(); - vespalib::MMapDataBuffer buffer; + vespalib::DataBuffer buffer; index.serialize(buffer); uint32_t doc_id_limit; DocIdLimitFinder finder(doc_id_limit); @@ -323,7 +323,7 @@ TEST("require that DocumentFeaturesStore is restored on deserialization") { EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid()); indexFeature(index, doc_id, min_feature, {{hash, interval}}, {{hash2, bounds}}); - vespalib::MMapDataBuffer buffer; + vespalib::DataBuffer buffer; index.serialize(buffer); uint32_t doc_id_limit; DocIdLimitFinder finder(doc_id_limit); diff --git a/searchlib/src/tests/predicate/simple_index_test.cpp b/searchlib/src/tests/predicate/simple_index_test.cpp index d9cbd473ebf..59e4fc53c05 100644 --- a/searchlib/src/tests/predicate/simple_index_test.cpp +++ b/searchlib/src/tests/predicate/simple_index_test.cpp @@ -24,15 +24,13 @@ struct MyData { }; struct MyDataSerializer : PostingSerializer<MyData> { - void serialize(const MyData &data, - vespalib::MMapDataBuffer& buffer) const { + void serialize(const MyData &data, vespalib::DataBuffer& buffer) const { buffer.writeInt32(data.data); } }; struct MyDataDeserializer : PostingDeserializer<MyData> { - MyData deserialize(vespalib::MMapDataBuffer& buffer) { - return {buffer.readInt32()}; + MyData deserialize(vespalib::DataBuffer& buffer) { return {buffer.readInt32()}; } }; @@ -168,7 +166,7 @@ TEST_FF("require that SimpleIndex can be serialized and deserialized.", Fixture, f1.addPosting(key, id, {id}); } f1.commit(); - vespalib::MMapDataBuffer buffer; + vespalib::DataBuffer buffer; f1.index().serialize(buffer, MyDataSerializer()); MyObserver observer; MyDataDeserializer deserializer; diff --git a/searchlib/src/tests/query/querybuilder_test.cpp b/searchlib/src/tests/query/querybuilder_test.cpp index b64a46e9b18..818324ab3b8 100644 --- a/searchlib/src/tests/query/querybuilder_test.cpp +++ b/searchlib/src/tests/query/querybuilder_test.cpp @@ -429,12 +429,12 @@ TEST("require that Invalid Trees Cannot Be Built") { QueryBuilder<SimpleQueryNodeTypes> builder; builder.addAnd(1); ASSERT_TRUE(!builder.build().get()); - EXPECT_EQUAL("Trying to build incomplete query tree.", builder.error()); + EXPECT_EQUAL("QueryBuilderBase::build: QueryBuilder got invalid node structure. _nodes are not empty.", builder.error()); // Adding a node after build() and before reset() is a no-op. builder.addStringTerm(str[0], view[0], id[0], weight[0]); ASSERT_TRUE(!builder.build().get()); - EXPECT_EQUAL("Trying to build incomplete query tree.", builder.error()); + EXPECT_EQUAL("QueryBuilderBase::build: QueryBuilder got invalid node structure. _nodes are not empty.", builder.error()); builder.reset(); EXPECT_TRUE(builder.error().empty()); @@ -444,12 +444,16 @@ TEST("require that Invalid Trees Cannot Be Built") { builder.addStringTerm(str[0], view[0], id[0], weight[0]); builder.addStringTerm(str[1], view[1], id[1], weight[1]); ASSERT_TRUE(!builder.build().get()); - EXPECT_EQUAL("QueryBuilder got invalid node structure.", builder.error()); + EXPECT_EQUAL("QueryBuilderBase::addCompleteNode: QueryBuilder got invalid node structure." + " Incomming node is 'search::query::SimpleStringTerm', while root is non-null('search::query::SimpleAnd')", + builder.error()); // Adding an intermediate node after build() is also a no-op. builder.addAnd(1); ASSERT_TRUE(!builder.build().get()); - EXPECT_EQUAL("QueryBuilder got invalid node structure.", builder.error()); + EXPECT_EQUAL("QueryBuilderBase::addCompleteNode: QueryBuilder got invalid node structure." + " Incomming node is 'search::query::SimpleStringTerm', while root is non-null('search::query::SimpleAnd')", + builder.error()); } TEST("require that Term Index Can Be Added") { @@ -610,6 +614,43 @@ TEST("require that empty intermediate node can be added") { EXPECT_EQUAL(0u, and_node->getChildren().size()); } +TEST("test query parsing error") { + const char * STACK = + "\001\002\001\003\000\005\002\004\001\034F\001\002\004term\004\004term\002dx\004\004term\002ifD\002\004term\001xD\003\004term\002dxE\004\004term\001\060F\005\002\004term" + "\004\004term\006radius\004\004term\002ifD\006\004term\001xD\a\004term\004sizeE\b\004term\001\060D\t\004term\001xF\n\002\004term\004\004term\002dx\004\004term\002ifD\v\004term" + "\001xD\f\004term\004sizeE\r\004term\001\060D\016\004term\002dxD\017\004term\004sizeE\020\004term\001\060F\021\002\004term\004\004term\006radius\004\004term\002ifD\022\004term" + "\001yD\023\004term\001yF\024\002\004term\004\004term\002dy\004\004term\002ifD\025\004term\001yD\026\004term\002dyE\027\004term\001\060F\030\002\004term\004\004term\006radius" + "\004\004term\002ifD\031\004term\001yD\032\004term\004sizeE\033\004term\001\061\004\001 F\034\002\004term\004\004term\001\061\004\004term\001xF\035\002\004term\004\004term" + "\001\061\004\004term\001xF\036\002\004term\004\004term\001\061\004\004term\001y\002\004\001\034F\037\002\016term_variation\004\016term_variation\002dx\004\016term_variation" + "\002ifD \016term_variation\001xD!\016term_variation\002dxE\"\016term_variation\001\060F#\002\016term_variation\004\016term_variation\006radius\004\016term_variation" + "\002ifD$\016term_variation\001xD%\016term_variation\004sizeE&\016term_variation\001\060D'\016term_variation\001xF(\002\016term_variation\004\016term_variation" + "\002dx\004\016term_variation\002ifD)\016term_variation\001xD*\016term_variation\004sizeE+\016term_variation\001\060D,\016term_variation\002dxD-\016term_variation\004size" + "E.\016term_variation\001\060F/\002\016term_variation\004\016term_variation\006radius\004\016term_variation\002ifD0\016term_variation\001yD1\016term_variation" + "\001yF2\002\016term_variation\004\016term_variation\002dy\004\016term_variation\002ifD3\016term_variation\001yD4\016term_variation\002dyE5\016term_variation" + "\001\060F6\002\016term_variation\004\016term_variation\006radius\004\016term_variation\002ifD7\016term_variation\001yD8\016term_variation\004sizeE9\016term_variation" + "\001\061\004\001 F:\002\016term_variation\004\016term_variation\001\061\004\016term_variation\001xF;\002\016term_variation\004\016term_variation\001\061\004\016term_variation" + "\001xF<\002\016term_variation\004\016term_variation\001\061\004\016term_variation\001yD=\000\tvariation\002\004\001\034F>\002\004term\004\004term\002dx\004\004term\002ifD?\004term" + "\001xD\200@\004term\002dxE\200A\004term\001\060F\200B\002\004term\004\004term\006radius\004\004term\002ifD\200C\004term\001xD\200D\004term\004sizeE\200E\004term\001\060D\200F\004term" + "\001xF\200G\002\004term\004\004term\002dx\004\004term\002ifD\200H\004term\001xD\200I\004term\004sizeE\200J\004term\001\060D\200K\004term\002dxD\200L\004term\004sizeE\200M\004term" + "\001\060F\200N\002\004term\004\004term\006radius\004\004term\002ifD\200O\004term\001yD\200P\004term\001yF\200Q\002\004term\004\004term\002dy\004\004term\002ifD\200R\004term" + "\001yD\200S\004term\002dyE\200T\004term\001\060F\200U\002\004term\004\004term\006radius\004\004term\002ifD\200V\004term\001yD\200W\004term\004sizeE\200X\004term" + "\001\061\004\001 F\200Y\002\004term\004\004term\001\061\004\004term\001xF\200Z\002\004term\004\004term\001\061\004\004term\001xF\200[\002\004term\004\004term\001\061\004\004term" + "\001y\002\004\001\034F\200\\\002\016term_variation\004\016term_variation\002dx\004\016term_variation\002ifD\200]\016term_variation\001xD\200^\016term_variation" + "\002dxE\200_\016term_variation\001\060F\200`\002\016term_variation\004\016term_variation\006radius\004\016term_variation\002ifD\200a\016term_variation\001xD\200b\016term_variation" + "\004sizeE\200c\016term_variation\001\060D\200d\016term_variation\001xF\200e\002\016term_variation\004\016term_variation\002dx\004\016term_variation\002ifD\200f\016term_variation" + "\001xD\200g\016term_variation\004sizeE\200h\016term_variation\001\060D\200i\016term_variation\002dxD\200j\016term_variation\004sizeE\200k\016term_variation" + "\001\060F\200l\002\016term_variation\004\016term_variation\006radius\004\016term_variation\002ifD\200m\016term_variation\001yD\200n\016term_variation\001yF\200o\002\016term_variation" + "\004\016term_variation\002dy\004\016term_variation\002ifD\200p\016term_variation\001yD\200q\016term_variation\002dyE\200r\016term_variation\001\060F\200s\002\016term_variation" + "\004\016term_variation\006radius\004\016term_variation\002ifD\200t\016term_variation\001yD\200u\016term_variation\004sizeE\200v\016term_variation" + "\001\061\004\001 F\200w\002\016term_variation\004\016term_variation\001\061\004\016term_variation\001xF\200x\002\016term_variation\004\016term_variation\001\061\004\016term_variation" + "\001xF\200y\002\016term_variation\004\016term_variation\001\061\004\016term_variation\001yĀz\n\vsource_lang\002jaĀ{\n\vtarget_lang\002en\000\002Ā|\v\alicense" + "\017countrycode_allĀ}\v\alicense\016countrycode_tw"; + string stackDump(STACK, 2936); + SimpleQueryStackDumpIterator iterator(stackDump); + Node::UP new_node = QueryTreeCreator<SimpleQueryNodeTypes>::create(iterator); + EXPECT_FALSE(new_node); +} + } // namespace TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/searchlib/src/tests/stackdumpiterator/stackdumpiteratortest.cpp b/searchlib/src/tests/stackdumpiterator/stackdumpiteratortest.cpp index 19ce69550f7..326b2c0b280 100644 --- a/searchlib/src/tests/stackdumpiterator/stackdumpiteratortest.cpp +++ b/searchlib/src/tests/stackdumpiterator/stackdumpiteratortest.cpp @@ -137,10 +137,6 @@ StackDumpIteratorTest::ShowResult(int testNo, unsigned int expected) { unsigned int results = 0; - const char *idx_ptr; - const char *term_ptr; - size_t idx_len; - size_t term_len; int num = 0; @@ -149,16 +145,16 @@ StackDumpIteratorTest::ShowResult(int testNo, printf("%03d: ", testNo); while (actual.next()) { - actual.getIndexName(&idx_ptr, &idx_len); - actual.getTerm(&term_ptr, &term_len); + vespalib::stringref idx = actual.getIndexName(); + vespalib::stringref term = actual.getTerm(); #if 0 printf("StackItem #%d: %d %d '%.*s:%.*s'\n", actual.getNum(), actual.getType(), actual.getArity(), - idx_len, idx_ptr, - term_len, term_ptr); + idx.size(), idx.c_str(), + term.size(), term.c_str()); #endif item = correct.Pop(); @@ -178,12 +174,12 @@ StackDumpIteratorTest::ShowResult(int testNo, delete item; break; } - if (strncmp(item->_indexName.c_str(), idx_ptr, idx_len) != 0) { + if (strncmp(item->_indexName.c_str(), idx.c_str(), idx.size()) != 0) { results |= ITERATOR_ERROR_WRONG_INDEX; delete item; break; } - if (strncmp(item->_term.c_str(), term_ptr, term_len) != 0) { + if (strncmp(item->_term.c_str(), term.c_str(), term.size()) != 0) { results |= ITERATOR_ERROR_WRONG_TERM; delete item; break; diff --git a/searchlib/src/vespa/searchlib/attribute/changevector.h b/searchlib/src/vespa/searchlib/attribute/changevector.h index a714e502588..90a8ff7e2d4 100644 --- a/searchlib/src/vespa/searchlib/attribute/changevector.h +++ b/searchlib/src/vespa/searchlib/attribute/changevector.h @@ -140,7 +140,7 @@ template <typename T> class ChangeVectorT : public ChangeVectorBase { private: typedef vespalib::hash_map<uint32_t, uint32_t> Map; - typedef vespalib::Array<T, vespalib::DefaultAlloc> Vector; + typedef vespalib::Array<T> Vector; public: ChangeVectorT() : _tail(0) { } class const_iterator { diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.h b/searchlib/src/vespa/searchlib/attribute/enumstore.h index 0f0675248a7..18bb676644e 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstore.h +++ b/searchlib/src/vespa/searchlib/attribute/enumstore.h @@ -154,7 +154,7 @@ public: uint32_t _refCount; }; - typedef vespalib::Array<UniqueEntry, vespalib::DefaultAlloc> Uniques; + typedef vespalib::Array<UniqueEntry> Uniques; private: Uniques _uniques; uint64_t _bufferSize; diff --git a/searchlib/src/vespa/searchlib/attribute/enumstorebase.h b/searchlib/src/vespa/searchlib/attribute/enumstorebase.h index 3b8b9823d87..4c2d35d645f 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstorebase.h +++ b/searchlib/src/vespa/searchlib/attribute/enumstorebase.h @@ -34,8 +34,8 @@ class EnumStoreComparatorWrapper; typedef btree::DataStoreT<btree::AlignedEntryRefT<31, 4> > EnumStoreDataStoreType; typedef EnumStoreDataStoreType::RefType EnumStoreIndex; -typedef vespalib::Array<EnumStoreIndex, vespalib::DefaultAlloc> EnumStoreIndexVector; -typedef vespalib::Array<uint32_t, vespalib::DefaultAlloc> EnumStoreEnumVector; +typedef vespalib::Array<EnumStoreIndex> EnumStoreIndexVector; +typedef vespalib::Array<uint32_t> EnumStoreEnumVector; typedef btree::BTreeTraits<32, 32, 7, true> EnumTreeTraits; diff --git a/searchlib/src/vespa/searchlib/attribute/iattributefilewriter.h b/searchlib/src/vespa/searchlib/attribute/iattributefilewriter.h index ec0fbf8e6a4..d2d5cb86cf5 100644 --- a/searchlib/src/vespa/searchlib/attribute/iattributefilewriter.h +++ b/searchlib/src/vespa/searchlib/attribute/iattributefilewriter.h @@ -16,7 +16,7 @@ class BufferWriter; class IAttributeFileWriter { public: - using BufferBuf = vespalib::MMapDataBuffer; + using BufferBuf = vespalib::DataBuffer; using Buffer = std::unique_ptr<BufferBuf>; virtual ~IAttributeFileWriter() = default; diff --git a/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h b/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h index 47b7eab1b83..749247b0e78 100644 --- a/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h +++ b/searchlib/src/vespa/searchlib/attribute/loadedenumvalue.h @@ -94,7 +94,7 @@ public: } }; -typedef vespalib::Array<LoadedEnumAttribute, vespalib::DefaultAlloc> LoadedEnumAttributeVector; +typedef vespalib::Array<LoadedEnumAttribute> LoadedEnumAttributeVector; /** diff --git a/searchlib/src/vespa/searchlib/attribute/loadednumericvalue.cpp b/searchlib/src/vespa/searchlib/attribute/loadednumericvalue.cpp index bea214ed8bd..1fb2434167f 100644 --- a/searchlib/src/vespa/searchlib/attribute/loadednumericvalue.cpp +++ b/searchlib/src/vespa/searchlib/attribute/loadednumericvalue.cpp @@ -12,9 +12,7 @@ namespace attribute template <typename T> void -sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<T>, - vespalib::DefaultAlloc> & - loaded) +sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<T>> & loaded) { ShiftBasedRadixSorter<LoadedNumericValue<T>, typename LoadedNumericValue<T>::ValueRadix, @@ -29,9 +27,7 @@ sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<T>, template <typename T> void -sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<T>, - vespalib::DefaultAlloc> & - loaded) +sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<T>> & loaded) { ShiftBasedRadixSorter<LoadedNumericValue<T>, typename LoadedNumericValue<T>::DocRadix, @@ -46,75 +42,51 @@ sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<T>, template void -sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<int8_t>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<int8_t>> & loaded); template void -sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<int16_t>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<int16_t>> & loaded); template void -sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<int32_t>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<int32_t>> & loaded); template void -sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<int64_t>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<int64_t>> & loaded); template void -sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<float>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<float>> & loaded); template void -sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<double>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<double>> & loaded); template void -sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<int8_t>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<int8_t>> & loaded); template void -sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<int16_t>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<int16_t>> & loaded); template void -sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<int32_t>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<int32_t>> & loaded); template void -sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<int64_t>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<int64_t>> & loaded); template void -sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<float>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<float>> & loaded); template void -sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<double>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<double>> & loaded); diff --git a/searchlib/src/vespa/searchlib/attribute/loadednumericvalue.h b/searchlib/src/vespa/searchlib/attribute/loadednumericvalue.h index a8855ba5f15..23b6f02d2fa 100644 --- a/searchlib/src/vespa/searchlib/attribute/loadednumericvalue.h +++ b/searchlib/src/vespa/searchlib/attribute/loadednumericvalue.h @@ -6,12 +6,9 @@ #include <vespa/searchlib/util/fileutil.h> #include "loadedvalue.h" +namespace search { -namespace search -{ - -namespace attribute -{ +namespace attribute { /** * Temporary representation of enumerated attribute loaded from non-enumerated @@ -21,20 +18,12 @@ namespace attribute template <typename T> struct LoadedNumericValue : public LoadedValue<T> { - LoadedNumericValue() - : LoadedValue<T>() - { - } + LoadedNumericValue() : LoadedValue<T>() { } - class ValueCompare : public std::binary_function<LoadedNumericValue<T>, - LoadedNumericValue<T>, - bool> + class ValueCompare : public std::binary_function<LoadedNumericValue<T>, LoadedNumericValue<T>, bool> { public: - bool - operator()(const LoadedNumericValue<T> &x, - const LoadedNumericValue<T> &y) const - { + bool operator()(const LoadedNumericValue<T> &x, const LoadedNumericValue<T> &y) const { return x < y; } }; @@ -42,9 +31,7 @@ struct LoadedNumericValue : public LoadedValue<T> class ValueRadix { public: - uint64_t - operator()(const LoadedValue<T> &v) const - { + uint64_t operator()(const LoadedValue<T> &v) const { return vespalib::convertForSort<T, true>::convert(v.getValue()); } }; @@ -53,15 +40,11 @@ struct LoadedNumericValue : public LoadedValue<T> template <typename T> void -sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<T>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByValue(SequentialReadModifyWriteVector<LoadedNumericValue<T>> & loaded); template <typename T> void -sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<T>, - vespalib::DefaultAlloc> & - loaded); +sortLoadedByDocId(SequentialReadModifyWriteVector<LoadedNumericValue<T>> & loaded); } // namespace attribute diff --git a/searchlib/src/vespa/searchlib/attribute/loadedstringvalue.cpp b/searchlib/src/vespa/searchlib/attribute/loadedstringvalue.cpp index 335abb799b0..478b7a5e7a9 100644 --- a/searchlib/src/vespa/searchlib/attribute/loadedstringvalue.cpp +++ b/searchlib/src/vespa/searchlib/attribute/loadedstringvalue.cpp @@ -3,18 +3,17 @@ #include <vespa/fastos/fastos.h> #include "loadedstringvalue.h" +using vespalib::Array; +using vespalib::alloc::MMapAllocFactory; -namespace search -{ +namespace search { -namespace attribute -{ +namespace attribute { void sortLoadedByValue(LoadedStringVectorReal &loaded) { - vespalib::Array<unsigned, vespalib::MMapAlloc> - radixScratchPad(loaded.size()); + Array<unsigned> radixScratchPad(loaded.size(), MMapAllocFactory::create()); for(size_t i(0), m(loaded.size()); i < m; i++) { loaded[i].prepareRadixSort(); } diff --git a/searchlib/src/vespa/searchlib/attribute/loadedstringvalue.h b/searchlib/src/vespa/searchlib/attribute/loadedstringvalue.h index 87e2574bdb6..15a29096e20 100644 --- a/searchlib/src/vespa/searchlib/attribute/loadedstringvalue.h +++ b/searchlib/src/vespa/searchlib/attribute/loadedstringvalue.h @@ -78,7 +78,7 @@ typedef RadixSortable<LoadedValue<const char *> > LoadedStringValue; typedef SequentialReadModifyWriteInterface<LoadedStringValue> LoadedStringVector; -typedef SequentialReadModifyWriteVector<LoadedStringValue, vespalib::DefaultAlloc> +typedef SequentialReadModifyWriteVector<LoadedStringValue> LoadedStringVectorReal; diff --git a/searchlib/src/vespa/searchlib/attribute/multinumericenumattribute.h b/searchlib/src/vespa/searchlib/attribute/multinumericenumattribute.h index 8ef3d28549b..fae23b72dba 100644 --- a/searchlib/src/vespa/searchlib/attribute/multinumericenumattribute.h +++ b/searchlib/src/vespa/searchlib/attribute/multinumericenumattribute.h @@ -29,7 +29,7 @@ protected: typedef typename B::BaseClass::largeint_t largeint_t; typedef typename B::BaseClass::LoadedNumericValueT LoadedNumericValueT; typedef typename B::BaseClass::LoadedVector LoadedVector; - typedef SequentialReadModifyWriteVector<LoadedNumericValueT, vespalib::DefaultAlloc> LoadedVectorR; + typedef SequentialReadModifyWriteVector<LoadedNumericValueT> LoadedVectorR; typedef typename B::BaseClass::Weighted Weighted; typedef typename B::BaseClass::WeightedInt WeightedInt; typedef typename B::BaseClass::WeightedFloat WeightedFloat; diff --git a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.cpp b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.cpp index e8e21073323..6c031116e5e 100644 --- a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.cpp +++ b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.cpp @@ -2,12 +2,13 @@ #include <vespa/fastos/fastos.h> #include <vespa/log/log.h> -LOG_SETUP(".searchlib.attribute.multivaluemapping"); #include "multivaluemapping.h" #include "multivaluemapping.hpp" #include "attributevector.h" #include "loadedenumvalue.h" +LOG_SETUP(".searchlib.attribute.multivaluemapping"); + namespace search { using vespalib::GenerationHeldBase; @@ -43,8 +44,7 @@ MultiValueMappingBaseBase:: computeNewSize(size_t used, size_t dead, size_t needed, size_t maxSize) { float growRatio = 1.5f; - size_t newSize = static_cast<size_t> - ((used - dead + needed) * growRatio); + size_t newSize = static_cast<size_t>((used - dead + needed) * growRatio); if (newSize <= maxSize) return newSize; newSize = (used - dead + needed) + 1000000; @@ -54,14 +54,14 @@ computeNewSize(size_t used, size_t dead, size_t needed, size_t maxSize) return 0; } -MultiValueMappingBaseBase::Histogram::Histogram(size_t maxValues) : +MultiValueMappingBaseBase::Histogram::Histogram(uint32_t maxValues) : _maxValues(maxValues), _histogram() { } MultiValueMappingBaseBase::Histogram -MultiValueMappingBaseBase::getEmptyHistogram(size_t maxValues) const +MultiValueMappingBaseBase::getEmptyHistogram(uint32_t maxValues) const { return Histogram(maxValues); } @@ -81,7 +81,7 @@ MultiValueMappingBaseBase::getHistogram(AttributeVector::ReaderBase &reader) void -MultiValueMappingBaseBase::clearPendingCompact(void) +MultiValueMappingBaseBase::clearPendingCompact() { if (!_pendingCompact || _pendingCompactVectorVector || !_pendingCompactSingleVector.empty()) @@ -109,7 +109,7 @@ public: } virtual - ~MultiValueMappingHeldVector(void) + ~MultiValueMappingHeldVector() { _mvmb.doneHoldVector(_idx); } @@ -119,11 +119,6 @@ public: template <typename I> void MultiValueMappingBase<I>::doneHoldVector(Index idx) { -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, - "free vector: idx.values() = %u, idx.alternative() = %u", - idx.values(), idx.alternative()); -#endif clearVector(idx); if (idx.values() < Index::maxValues()) { _singleVectorsStatus[idx.vectorIdx()] = FREE; @@ -139,13 +134,13 @@ MultiValueMappingBase<I>::getMemoryUsage() const { MemoryUsage retval = _indices.getMemoryUsage(); - for (uint32_t i = 0; i < _singleVectorsStatus.size(); ++i) { + for (size_t i = 0; i < _singleVectorsStatus.size(); ++i) { if (_singleVectorsStatus[i] == HOLD) continue; const MemoryUsage & memUsage(getSingleVectorUsage(i)); retval.merge(memUsage); } - for (uint32_t i = 0; i < _vectorVectorsStatus.size(); ++i) { + for (size_t i = 0; i < _vectorVectorsStatus.size(); ++i) { if (_vectorVectorsStatus[i] == HOLD) continue; const MemoryUsage & memUsage(getVectorVectorUsage(i)); @@ -160,12 +155,12 @@ AddressSpace MultiValueMappingBase<I>::getAddressSpaceUsage() const { size_t addressSpaceUsed = 0; - for (uint32_t i = 0; i < _singleVectorsStatus.size(); ++i) { + for (size_t i = 0; i < _singleVectorsStatus.size(); ++i) { if (_singleVectorsStatus[i] == ACTIVE) { addressSpaceUsed = std::max(addressSpaceUsed, getSingleVectorAddressSpaceUsed(i)); } } - for (uint32_t i = 0; i < _vectorVectorsStatus.size(); ++i) { + for (size_t i = 0; i < _vectorVectorsStatus.size(); ++i) { if (_vectorVectorsStatus[i] == ACTIVE) { addressSpaceUsed = std::max(addressSpaceUsed, getVectorVectorAddressSpaceUsed(i)); } diff --git a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.h b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.h index 3134f826774..e29aa196f24 100644 --- a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.h +++ b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.h @@ -34,58 +34,25 @@ public: : _idx(0) { _idx += static_cast<T>(values_) << (NUM_ALT_BITS+NUM_OFFSET_BITS); - _idx += static_cast<T>((alternative_) & - ((1<<NUM_ALT_BITS) - 1)) << NUM_OFFSET_BITS; + _idx += static_cast<T>((alternative_) & ((1<<NUM_ALT_BITS) - 1)) << NUM_OFFSET_BITS; _idx += offset_; } - uint32_t - values(void) const - { - return _idx >> (NUM_ALT_BITS+NUM_OFFSET_BITS); - } - - uint32_t - alternative(void) const - { - return (_idx >> NUM_OFFSET_BITS) & ((1<<NUM_ALT_BITS) - 1); - } + uint32_t values() const { return _idx >> (NUM_ALT_BITS+NUM_OFFSET_BITS); } + uint32_t alternative() const { return (_idx >> NUM_OFFSET_BITS) & ((1<<NUM_ALT_BITS) - 1); } // values and alternative combined - uint32_t - vectorIdx(void) const - { - return _idx >> NUM_OFFSET_BITS; - } - - uint32_t offset(void) const - { - return (_idx & ((1u << NUM_OFFSET_BITS) - 1)); - } - - T idx() const { return _idx; } - - static uint32_t - maxValues(void) - { - return (1 << NUM_VALUE_BITS) - 1; - } + uint32_t vectorIdx() const { return _idx >> NUM_OFFSET_BITS; } + uint64_t offset() const { return (_idx & ((1ul << NUM_OFFSET_BITS) - 1)); } + T idx() const { return _idx; } - static uint32_t - alternativeSize(void) - { - return 1 << NUM_ALT_BITS; - } - - static T - offsetSize(void) - { - return 1 << (NUM_OFFSET_BITS); - } + static uint32_t maxValues() { return (1 << NUM_VALUE_BITS) - 1; } + static uint32_t alternativeSize() { return 1 << NUM_ALT_BITS; } + static uint64_t offsetSize() { return 1ul << (NUM_OFFSET_BITS); } }; typedef Index<uint32_t, 27,4,1> Index32; -typedef Index<uint64_t, 31,10,1> Index64; +typedef Index<uint64_t, 32,12,1> Index64; template <typename T, typename I> struct MVMTemplateArg { @@ -103,33 +70,21 @@ public: _dead(0), _wantCompact(false), _usage() - { - } + { } - uint32_t used() const { return _used; } - uint32_t dead() const { return _dead; } + size_t used() const { return _used; } + size_t dead() const { return _dead; } void incUsed(uint32_t inc) { _used += inc; } void incDead(uint32_t inc) { _dead += inc; } - - void - setWantCompact(void) - { - _wantCompact = true; - } - - bool - getWantCompact(void) const - { - return _wantCompact; - } - + void setWantCompact() { _wantCompact = true; } + bool getWantCompact() const { return _wantCompact; } MemoryUsage & getUsage() { return _usage; } const MemoryUsage & getUsage() const { return _usage; } protected: void reset() { _used = 0; _dead = 0; } private: - uint32_t _used; - uint32_t _dead; + size_t _used; + size_t _dead; bool _wantCompact; MemoryUsage _usage; }; @@ -141,13 +96,13 @@ public: class Histogram { private: - typedef vespalib::hash_map<uint32_t, uint32_t> HistogramM; + typedef vespalib::hash_map<uint32_t, size_t> HistogramM; public: typedef HistogramM::const_iterator const_iterator; - Histogram(size_t maxValues); - uint32_t & operator [] (uint32_t i) { return _histogram[std::min(i, _maxValues)]; } + Histogram(uint32_t maxValues); + size_t & operator [] (uint32_t i) { return _histogram[std::min(i, _maxValues)]; } const_iterator begin() const { return _histogram.begin(); } - const_iterator end() const { return _histogram.end(); } + const_iterator end() const { return _histogram.end(); } private: uint32_t _maxValues; HistogramM _histogram; @@ -164,7 +119,7 @@ protected: }; typedef AttributeVector::generation_t generation_t; - typedef vespalib::Array<VectorStatus> StatusVector; + typedef std::vector<VectorStatus> StatusVector; typedef vespalib::GenerationHolder GenerationHolder; // active -> hold @@ -177,7 +132,7 @@ protected: std::set<uint32_t> _pendingCompactSingleVector; bool _pendingCompactVectorVector; bool _pendingCompact; - Histogram getEmptyHistogram(size_t maxValues) const; + Histogram getEmptyHistogram(uint32_t maxValues) const; virtual const MemoryUsage & getSingleVectorUsage(size_t i) const = 0; virtual const MemoryUsage & getVectorVectorUsage(size_t i) const = 0; virtual size_t getSingleVectorAddressSpaceUsed(size_t i) const = 0; @@ -192,22 +147,15 @@ public: Histogram getHistogram(AttributeVector::ReaderBase & reader) const; size_t getTotalValueCnt() const { return _totalValueCnt; } static void failNewSize(uint64_t minNewSize, uint64_t maxSize); + void clearPendingCompact(); - void - clearPendingCompact(void); + static size_t computeNewSize(size_t used, size_t dead, size_t needed, size_t maxSize); - static size_t - computeNewSize(size_t used, size_t dead, size_t needed, size_t maxSize); - - void - transferHoldLists(generation_t generation) - { + void transferHoldLists(generation_t generation) { _genHolder.transferHoldLists(generation); } - void - trimHoldLists(generation_t firstUsed) - { + void trimHoldLists(generation_t firstUsed) { _genHolder.trimHoldLists(firstUsed); } }; @@ -236,73 +184,38 @@ private: virtual void clearVector(Index idx) = 0; public: - using IndexCopyVector = vespalib::Array<Index, vespalib::DefaultAlloc>; + using IndexCopyVector = vespalib::Array<Index>; - void - doneHoldVector(Index idx); + void doneHoldVector(Index idx); - virtual Histogram getEmptyHistogram() const override { + Histogram getEmptyHistogram() const override { return MultiValueMappingBaseBase::getEmptyHistogram(Index::maxValues()); } - virtual MemoryUsage getMemoryUsage() const override; - + MemoryUsage getMemoryUsage() const override; AddressSpace getAddressSpaceUsage() const; + size_t getNumKeys() const { return _indices.size(); } + size_t getCapacityKeys() const { return _indices.capacity(); } - size_t getNumKeys(void) const - { - return _indices.size(); - } - - size_t getCapacityKeys(void) const - { - return _indices.capacity(); - } - - IndexCopyVector - getIndicesCopy() const - { + IndexCopyVector getIndicesCopy() const { uint32_t size = _committedDocIdLimit; assert(size <= _indices.size()); - return std::move(IndexCopyVector(&_indices[0], &_indices[0] + size)); + return IndexCopyVector(&_indices[0], &_indices[0] + size); } - bool - hasKey(uint32_t key) const - { - return key < _indices.size(); - } - - bool - hasReaderKey(uint32_t key) const - { + bool hasReaderKey(uint32_t key) const { return key < _committedDocIdLimit && key < _indices.size(); } - bool - isFull(void) const - { - return _indices.isFull(); - } - - static size_t - maxValues(void) - { - return Index::maxValues(); - } - - void - addKey(uint32_t & key); - - void - shrinkKeys(uint32_t newSize); - - void - clearDocs(uint32_t lidLow, uint32_t lidLimit, AttributeVector &v); - + bool hasKey(uint32_t key) const { return key < _indices.size(); } + bool isFull() const { return _indices.isFull(); } + void addKey(uint32_t & key); + void shrinkKeys(uint32_t newSize); + void clearDocs(uint32_t lidLow, uint32_t lidLimit, AttributeVector &v); void holdElem(Index idx, size_t size); - virtual void doneHoldElem(Index idx) = 0; + + static size_t maxValues() { return Index::maxValues(); } }; extern template class MultiValueMappingBase<multivalue::Index32>; @@ -314,37 +227,33 @@ class MultiValueMappingFallbackVectorHold { V _hold; public: - MultiValueMappingFallbackVectorHold(size_t size, - V &rhs) + MultiValueMappingFallbackVectorHold(size_t size, V &rhs) : vespalib::GenerationHeldBase(size), _hold() { _hold.swap(rhs); } - virtual - ~MultiValueMappingFallbackVectorHold(void) - { - } + virtual ~MultiValueMappingFallbackVectorHold() { } }; template <typename VT> -class MultiValueMappingVector : public vespalib::Array<VT, vespalib::DefaultAlloc>, +class MultiValueMappingVector : public vespalib::Array<VT>, public MultiValueMappingVectorBaseBase { public: - typedef vespalib::Array<VT, vespalib::DefaultAlloc> VectorBase; + typedef vespalib::Array<VT> VectorBase; typedef MultiValueMappingFallbackVectorHold<VectorBase> FallBackHold; MultiValueMappingVector(); - MultiValueMappingVector(uint32_t n); + MultiValueMappingVector(size_t n); MultiValueMappingVector(const MultiValueMappingVector & rhs); MultiValueMappingVector & operator=(const MultiValueMappingVector & rhs); ~MultiValueMappingVector(); - void reset(uint32_t n); - uint32_t remaining() const { return this->size() - used(); } + void reset(size_t n); + size_t remaining() const { return this->size() - used(); } void swapVector(MultiValueMappingVector & rhs); vespalib::GenerationHeldBase::UP @@ -391,7 +300,7 @@ private: virtual const MemoryUsage & getVectorVectorUsage(size_t i) const override; virtual size_t getSingleVectorAddressSpaceUsed(size_t i) const override; virtual size_t getVectorVectorAddressSpaceUsed(size_t i) const override; - void initVectors(uint32_t initSize); + void initVectors(size_t initSize); void initVectors(const Histogram & initCapacity); bool getValidIndex(Index & newIdx, uint32_t numValues); @@ -420,7 +329,7 @@ private: vec.incDead(numValues); vec.getUsage().incDeadBytes(numValues * sizeof(T)); } - void swapVector(SingleVector & vec, uint32_t initSize) { + void swapVector(SingleVector & vec, size_t initSize) { SingleVector(initSize).swapVector(vec); vec.getUsage().setAllocatedBytes(initSize * sizeof(T)); } @@ -433,7 +342,7 @@ private: void incDead(VectorVector & vec) { vec.incDead(1); } - void swapVector(VectorVector & vec, uint32_t initSize) { + void swapVector(VectorVector & vec, size_t initSize) { VectorVector(initSize).swapVector(vec); vec.getUsage().setAllocatedBytes(initSize * sizeof(VectorBase)); } @@ -443,13 +352,13 @@ public: MultiValueMappingT(uint32_t &committedDocIdLimit, const GrowStrategy & gs = GrowStrategy()); MultiValueMappingT(uint32_t &committedDocIdLimit, - uint32_t numKeys, uint32_t initSize = 0, + uint32_t numKeys, size_t initSize = 0, const GrowStrategy & gs = GrowStrategy()); MultiValueMappingT(uint32_t &committedDocIdLimit, uint32_t numKeys, const Histogram & initCapacity, const GrowStrategy & gs = GrowStrategy()); ~MultiValueMappingT(); - void reset(uint32_t numKeys, uint32_t initSize = 0); + void reset(uint32_t numKeys, size_t initSize = 0); void reset(uint32_t numKeys, const Histogram & initCapacity); uint32_t get(uint32_t key, std::vector<T> & buffer) const; template <typename BufferType> @@ -498,11 +407,6 @@ public: bool hasWeights); virtual void doneHoldElem(Index idx) override; - -#ifdef DEBUG_MULTIVALUE_MAPPING - void printContent() const; - void printVectorVectors() const; -#endif }; //----------------------------------------------------------------------------- @@ -521,7 +425,7 @@ MultiValueMappingVector<VT>::~MultiValueMappingVector() } template <typename VT> -MultiValueMappingVector<VT>::MultiValueMappingVector(uint32_t n) +MultiValueMappingVector<VT>::MultiValueMappingVector(size_t n) : VectorBase(), MultiValueMappingVectorBaseBase() { @@ -549,7 +453,7 @@ MultiValueMappingVector<VT>::operator=(const MultiValueMappingVector & rhs) template <typename VT> void -MultiValueMappingVector<VT>::reset(uint32_t n) +MultiValueMappingVector<VT>::reset(size_t n) { this->resize(n); MultiValueMappingVectorBaseBase::reset(); @@ -586,9 +490,9 @@ MultiValueMappingVector<VT>::fallbackResize(uint64_t newSize) template <typename T, typename I> void -MultiValueMappingT<T, I>::initVectors(uint32_t initSize) +MultiValueMappingT<T, I>::initVectors(size_t initSize) { - for (uint32_t i = 0; i < this->_singleVectorsStatus.size(); ++i) { + for (size_t i = 0; i < this->_singleVectorsStatus.size(); ++i) { if (i % Index::alternativeSize() == 0) { swapVector(_singleVectors[i], initSize); this->_singleVectorsStatus[i] = MultiValueMappingBaseBase::ACTIVE; @@ -597,7 +501,7 @@ MultiValueMappingT<T, I>::initVectors(uint32_t initSize) this->_singleVectorsStatus[i] = MultiValueMappingBaseBase::FREE; } } - for (uint32_t i = 0; i < this->_vectorVectorsStatus.size(); ++i) { + for (size_t i = 0; i < this->_vectorVectorsStatus.size(); ++i) { if (i % Index::alternativeSize() == 0) { swapVector(_vectorVectors[i], initSize); this->_vectorVectorsStatus[i] = MultiValueMappingBaseBase::ACTIVE; @@ -612,23 +516,17 @@ template <typename T, typename I> void MultiValueMappingT<T, I>::initVectors(const Histogram &initCapacity) { - for (typename Histogram::const_iterator it(initCapacity.begin()), mt(initCapacity.end()); it != mt; ++it) { - uint32_t valueCnt = it->first; - uint64_t numEntries = it->second; + for (const auto & entry : initCapacity) { + uint32_t valueCnt = entry.first; + uint64_t numEntries = entry.second; if (valueCnt != 0 && valueCnt < Index::maxValues()) { uint64_t maxSize = Index::offsetSize() * valueCnt; - if (maxSize > std::numeric_limits<uint32_t>::max()) { - maxSize = std::numeric_limits<uint32_t>::max(); - maxSize -= (maxSize % valueCnt); - } if (numEntries * valueCnt > maxSize) { failNewSize(numEntries * valueCnt, maxSize); } swapVector(_singleVectors[valueCnt * 2], valueCnt * numEntries); } else if (valueCnt == Index::maxValues()) { uint64_t maxSize = Index::offsetSize(); - if (maxSize > std::numeric_limits<uint32_t>::max()) - maxSize = std::numeric_limits<uint32_t>::max(); if (numEntries > maxSize) { failNewSize(numEntries, maxSize); } @@ -651,7 +549,7 @@ MultiValueMappingT<T, I>::getValidIndex(Index &newIdx, uint32_t numValues) return false; } - uint32_t used = active.first->used(); + size_t used = active.first->used(); assert(used % numValues == 0); incUsed(*active.first, numValues); newIdx = Index(active.second.values(), active.second.alternative(), @@ -664,7 +562,7 @@ MultiValueMappingT<T, I>::getValidIndex(Index &newIdx, uint32_t numValues) return false; } - uint32_t used = active.first->used(); + size_t used = active.first->used(); incUsed(*active.first, numValues); (*active.first)[used].resize(numValues); newIdx = Index(active.second.values(), active.second.alternative(), @@ -687,9 +585,6 @@ compactSingleVector(SingleVectorPtr &activeVector, SingleVectorPtr freeVector = getSingleVector(valueCnt, MultiValueMappingBaseBase::FREE); if (freeVector.first == NULL) { -#ifdef LOG_MULTIVALUE_MAPPING - LOG(warning, "did not find any free '%u-vector'", valueCnt); -#endif uint64_t dead = activeVector.first->dead(); uint64_t fallbackNewSize = newSize + dead * valueCnt + 1024 * valueCnt; if (fallbackNewSize > maxSize) @@ -709,21 +604,11 @@ compactSingleVector(SingleVectorPtr &activeVector, return; } swapVector(*freeVector.first, newSize); -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, - "compacting from '%u-vector(%u)' " - "(s = %u, u = %u, d = %u) to " - "'%u-vector(%u)' (s = %u)", - valueCnt, activeVector.second.alternative(), - activeVector.first->size(), - activeVector.first->used() , activeVector.first->dead(), - valueCnt, freeVector.second.alternative(), newSize); -#endif uint32_t activeVectorIdx = activeVector.second.vectorIdx(); - for (uint32_t i = 0; i < this->_indices.size(); ++i) { + for (size_t i = 0; i < this->_indices.size(); ++i) { Index & idx = this->_indices[i]; if (activeVectorIdx == idx.vectorIdx()) { - for (uint32_t j = idx.offset() * idx.values(), + for (uint64_t j = idx.offset() * idx.values(), k = freeVector.first->used(); j < (idx.offset() + 1) * idx.values() && k < freeVector.first->used() + valueCnt; ++j, ++k) @@ -759,9 +644,6 @@ compactVectorVector(VectorVectorPtr &activeVector, VectorVectorPtr freeVector = getVectorVector(MultiValueMappingBaseBase::FREE); if (freeVector.first == NULL) { -#ifdef LOG_MULTIVALUE_MAPPING - LOG(error, "did not find any free vectorvector"); -#endif uint64_t dead = activeVector.first->dead(); uint64_t fallbackNewSize = newSize + dead + 1024; if (fallbackNewSize > maxSize) @@ -780,24 +662,15 @@ compactVectorVector(VectorVectorPtr &activeVector, return; } swapVector(*freeVector.first, newSize); -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, - "compacting from 'vectorvector(%u)' " - "(s = %u, u = %u, d = %u) to " - "'vectorvector(%u)' (s = %u)", - activeVector.second.alternative(), activeVector.first->size(), - activeVector.first->used(), activeVector.first->dead(), - freeVector.second.alternative(), newSize); -#endif uint32_t activeVectorIdx = activeVector.second.vectorIdx(); - for (uint32_t i = 0; i < this->_indices.size(); ++i) { + for (size_t i = 0; i < this->_indices.size(); ++i) { Index & idx = this->_indices[i]; if (activeVectorIdx == idx.vectorIdx()) { - uint32_t activeOffset = idx.offset(); - uint32_t vecSize = (*activeVector.first)[activeOffset].size(); - uint32_t freeOffset = freeVector.first->used(); + uint64_t activeOffset = idx.offset(); + uint64_t vecSize = (*activeVector.first)[activeOffset].size(); + uint64_t freeOffset = freeVector.first->used(); (*freeVector.first)[freeOffset].resize(vecSize); - for (uint32_t j = 0; j < vecSize; ++j) { + for (uint64_t j = 0; j < vecSize; ++j) { (*freeVector.first)[freeOffset][j] = (*activeVector.first)[activeOffset][j]; } @@ -820,7 +693,7 @@ typename MultiValueMappingT<T, I>::SingleVectorPtr MultiValueMappingT<T, I>::getSingleVector(uint32_t numValues, VectorStatus status) { - for (uint32_t i = numValues * Index::alternativeSize(); + for (size_t i = numValues * Index::alternativeSize(); i < (numValues + 1) * Index::alternativeSize(); ++i) { if (this->_singleVectorsStatus[i] == status) { @@ -837,7 +710,7 @@ template <typename T, typename I> typename MultiValueMappingT<T, I>::VectorVectorPtr MultiValueMappingT<T, I>::getVectorVector(VectorStatus status) { - for (uint32_t i = 0; i < _vectorVectors.size(); ++i) { + for (size_t i = 0; i < _vectorVectors.size(); ++i) { if (this->_vectorVectorsStatus[i] == status) { return VectorVectorPtr(&_vectorVectors[i], Index(Index::maxValues(), i, 0)); @@ -875,7 +748,7 @@ MultiValueMappingT<T, I>::MultiValueMappingT(uint32_t &committedDocIdLimit, template <typename T, typename I> MultiValueMappingT<T, I>::MultiValueMappingT(uint32_t &committedDocIdLimit, uint32_t numKeys, - uint32_t initSize, + size_t initSize, const GrowStrategy & gs) : MultiValueMappingBase<I>(committedDocIdLimit, numKeys, gs), _singleVectors((Index::maxValues()) * Index::alternativeSize()), @@ -905,7 +778,7 @@ MultiValueMappingT<T, I>::~MultiValueMappingT() template <typename T, typename I> void -MultiValueMappingT<T, I>::reset(uint32_t numKeys, uint32_t initSize) +MultiValueMappingT<T, I>::reset(uint32_t numKeys, size_t initSize) { MultiValueMappingBase<I>::reset(numKeys); initVectors(initSize); @@ -941,7 +814,7 @@ MultiValueMappingT<T, I>::get(uint32_t key, uint32_t available = idx.values(); uint32_t num2Read = std::min(available, sz); const SingleVector & vec = _singleVectors[idx.vectorIdx()]; - for (uint32_t i = 0, j = idx.offset() * idx.values(); + for (uint64_t i = 0, j = idx.offset() * idx.values(); i < num2Read && j < (idx.offset() + 1) * idx.values(); ++i, ++j) { buffer[i] = static_cast<BufferType>(vec[j]); } @@ -970,7 +843,7 @@ MultiValueMappingT<T, I>::get(uint32_t key, uint32_t index, T & value) const if (index >= idx.values()) { return false; } - uint32_t offset = idx.offset() * idx.values() + index; + uint64_t offset = idx.offset() * idx.values() + index; value = _singleVectors[idx.vectorIdx()][offset]; return true; } else { @@ -1020,40 +893,20 @@ MultiValueMappingT<T, I>::set(uint32_t key, if (!getValidIndex(newIdx, numValues)) { abort(); } -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, - "newIdx: values = %u, alternative = %u, offset = %u", - newIdx.values(), newIdx.alternative(), newIdx.offset()); -#endif if (newIdx.values() != 0 && newIdx.values() < Index::maxValues()) { SingleVector & vec = _singleVectors[newIdx.vectorIdx()]; - for (uint32_t i = newIdx.offset() * newIdx.values(), j = 0; + for (uint64_t i = newIdx.offset() * newIdx.values(), j = 0; i < (newIdx.offset() + 1) * newIdx.values() && j < numValues; ++i, ++j) { vec[i] = values[j]; } -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, - "inserted in '%u-vector(%u)': " - "key = %u, size = %u, used = %u, dead = %u, offset = %u", - newIdx.values(), newIdx.alternative(), - key, vec.size(), - vec.used(), vec.dead(), newIdx.offset() * newIdx.values()); -#endif } else if (newIdx.values() == Index::maxValues()) { VectorVector & vec = _vectorVectors[newIdx.alternative()]; for (uint32_t i = 0; i < numValues; ++i) { vec[newIdx.offset()][i] = values[i]; } -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, - "inserted %u values in 'vector-vector(%u)': " - "key = %u, size = %u, used = %u, dead = %u, offset = %u", - numValues, newIdx.alternative(), - key, vec.size(), vec.used(), vec.dead(), newIdx.offset()); -#endif } std::atomic_thread_fence(std::memory_order_release); @@ -1065,25 +918,12 @@ MultiValueMappingT<T, I>::set(uint32_t key, SingleVector & vec = _singleVectors[oldIdx.vectorIdx()]; incDead(vec, oldIdx.values()); this->decValueCnt(oldIdx.values()); -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, - "mark space dead in '%u-vector(%u)': " - "size = %u, used = %u, dead = %u", - oldIdx.values(), oldIdx.alternative(), - vec.size(), vec.used(), vec.dead()); -#endif } else if (oldIdx.values() == Index::maxValues()) { VectorVector & vec = _vectorVectors[oldIdx.alternative()]; uint32_t oldNumValues = vec[oldIdx.offset()].size(); incDead(vec); this->decValueCnt(oldNumValues); holdElem(oldIdx, sizeof(VectorBase) + sizeof(T) * oldNumValues); -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, - "mark space dead in 'vector-vector(%u)': " - "size = %u, used = %u, dead = %u", - oldIdx.alternative(), vec.size(), vec.used(), vec.dead()); -#endif } } @@ -1109,7 +949,7 @@ MultiValueMappingT<T, I>::replace(uint32_t key, if (currIdx.values() != 0 && currIdx.values() < Index::maxValues()) { SingleVector & vec = _singleVectors[currIdx.vectorIdx()]; - for (uint32_t i = currIdx.offset() * currIdx.values(), j = 0; + for (uint64_t i = currIdx.offset() * currIdx.values(), j = 0; i < (currIdx.offset() + 1) * currIdx.values() && j < numValues; ++i, ++j) { @@ -1216,9 +1056,9 @@ MultiValueMappingT<T, I>::enoughCapacity(const Histogram & capacityNeeded) { if (_pendingCompact) return false; - for (typename Histogram::const_iterator it(capacityNeeded.begin()), mt(capacityNeeded.end()); it != mt; ++it) { - uint32_t valueCnt = it->first; - uint64_t numEntries = it->second; + for (const auto & entry : capacityNeeded) { + uint32_t valueCnt = entry.first; + uint64_t numEntries = entry.second; if (valueCnt < Index::maxValues()) { SingleVectorPtr active = getSingleVector(valueCnt, MultiValueMappingBaseBase::ACTIVE); @@ -1240,24 +1080,18 @@ template <typename T, typename I> void MultiValueMappingT<T, I>::performCompaction(Histogram & capacityNeeded) { -#ifdef LOG_MULTIVALUE_MAPPING - LOG(info, "performCompaction()"); -#endif if (_pendingCompact) { // Further populate histogram to ensure pending compaction being done. - for (std::set<uint32_t>::const_iterator - pit(_pendingCompactSingleVector.begin()), - pmt(_pendingCompactSingleVector.end()); - pit != pmt; ++pit) { - (void) capacityNeeded[*pit]; + for (uint32_t value : _pendingCompactSingleVector) { + (void) capacityNeeded[value]; } if (_pendingCompactVectorVector) { (void) capacityNeeded[Index::maxValues()]; } } - for (typename Histogram::const_iterator it(capacityNeeded.begin()), mt(capacityNeeded.end()); it != mt; ++it) { - uint32_t valueCnt = it->first; - uint64_t numEntries = it->second; + for (const auto & entry : capacityNeeded) { + uint32_t valueCnt = entry.first; + uint64_t numEntries = entry.second; if (valueCnt != 0 && valueCnt < Index::maxValues()) { SingleVectorPtr active = getSingleVector(valueCnt, MultiValueMappingBaseBase::ACTIVE); @@ -1266,10 +1100,6 @@ MultiValueMappingT<T, I>::performCompaction(Histogram & capacityNeeded) _pendingCompactSingleVector.find(valueCnt) != _pendingCompactSingleVector.end()) { uint64_t maxSize = Index::offsetSize() * valueCnt; - if (maxSize > std::numeric_limits<uint32_t>::max()) { - maxSize = std::numeric_limits<uint32_t>::max(); - maxSize -= (maxSize % valueCnt); - } uint64_t newSize = this->computeNewSize(active.first->used(), active.first->dead(), valueCnt * numEntries, @@ -1284,8 +1114,6 @@ MultiValueMappingT<T, I>::performCompaction(Histogram & capacityNeeded) if (active.first->remaining() < numEntries || _pendingCompactVectorVector) { uint64_t maxSize = Index::offsetSize(); - if (maxSize > std::numeric_limits<uint32_t>::max()) - maxSize = std::numeric_limits<uint32_t>::max(); uint64_t newSize = this->computeNewSize(active.first->used(), active.first->dead(), numEntries, @@ -1298,41 +1126,6 @@ MultiValueMappingT<T, I>::performCompaction(Histogram & capacityNeeded) assert(!_pendingCompact); } -#ifdef DEBUG_MULTIVALUE_MAPPING -template <typename T, typename I> -void -MultiValueMappingT<T, I>::printContent() const -{ - for (uint32_t key = 0; key < this->_indices.size(); ++key) { - std::vector<T> buffer(getValueCount(key)); - get(key, buffer); - std::cout << "key = " << key << ", count = " << - getValueCount(key) << ": "; - for (uint32_t i = 0; i < buffer.size(); ++i) { - std::cout << buffer[i] << ", "; - } - std::cout << '\n'; - } -} - -template <typename T, typename I> -void -MultiValueMappingT<T, I>::printVectorVectors() const -{ - for (uint32_t i = 0; i < _vectorVectors.size(); ++i) { - std::cout << "Alternative " << i << '\n'; - for (uint32_t j = 0; j < _vectorVectors[i].size(); ++j) { - std::cout << "Vector " << j << ": ["; - uint32_t size = _vectorVectors[i][j].size(); - for (uint32_t k = 0; k < size; ++k) { - std::cout << _vectorVectors[i][j][k] << ", "; - } - std::cout << "]\n"; - } - } -} -#endif - extern template class MultiValueMappingFallbackVectorHold< MultiValueMappingVector<multivalue::Value<int8_t> >::VectorBase >; extern template class MultiValueMappingFallbackVectorHold< @@ -1495,4 +1288,3 @@ extern template class MultiValueMappingT< multivalue::Index64>; } // namespace search - diff --git a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.hpp b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.hpp index a1e06ee4759..9a42a708b5a 100644 --- a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.hpp +++ b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.hpp @@ -5,7 +5,6 @@ namespace search { - template <typename T, typename I> template <typename V, class Saver> uint32_t @@ -36,8 +35,9 @@ MultiValueMappingT<T, I>::fillMapped(AttributeVector::ReaderBase &attrReader, indices.push_back(T(map[e], weight)); saver.save(e, doc, vci, weight); } - if (maxvc < indices.size()) + if (maxvc < indices.size()) { maxvc = indices.size(); + } set(doc, indices); } assert(di == numValues); @@ -45,6 +45,5 @@ MultiValueMappingT<T, I>::fillMapped(AttributeVector::ReaderBase &attrReader, return maxvc; } - } // namespace search diff --git a/searchlib/src/vespa/searchlib/attribute/postingchange.h b/searchlib/src/vespa/searchlib/attribute/postingchange.h index b4abe0c7dd2..d728fa89fb0 100644 --- a/searchlib/src/vespa/searchlib/attribute/postingchange.h +++ b/searchlib/src/vespa/searchlib/attribute/postingchange.h @@ -18,7 +18,7 @@ template <typename P> class PostingChange { public: - typedef vespalib::Array<P, vespalib::DefaultAlloc> A; + typedef vespalib::Array<P> A; typedef std::vector<uint32_t> R; A _additions; R _removals; diff --git a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp index 2d79e80142a..847071cf4f4 100644 --- a/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/postinglistattribute.cpp @@ -237,8 +237,7 @@ handleFillPostings(LoadedVector &loaded) uint32_t docIdLimit = _attr.getNumDocs(); _postingList.resizeBitVectors(docIdLimit, docIdLimit); if ( ! loaded.empty() ) { - vespalib::Array<typename LoadedVector::Type, - vespalib::DefaultAlloc> similarValues; + vespalib::Array<typename LoadedVector::Type> similarValues; typename LoadedVector::Type v = loaded.read(); LoadedValueType prev = v.getValue(); for(size_t i(0), m(loaded.size()); i < m; i++, loaded.next()) { diff --git a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp index a0693755666..620188d9630 100644 --- a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp @@ -13,7 +13,7 @@ LOG_SETUP(".predicate_attribute"); using document::Predicate; using document::PredicateFieldValue; -using vespalib::MMapDataBuffer; +using vespalib::DataBuffer; using namespace search::predicate; namespace search { @@ -181,7 +181,7 @@ bool PredicateAttribute::onLoad() FileUtil::LoadedBuffer::UP loaded_buffer = loadDAT(); char *rawBuffer = const_cast<char *>(static_cast<const char *>(loaded_buffer->buffer())); size_t size = loaded_buffer->size(); - MMapDataBuffer buffer(rawBuffer, size); + DataBuffer buffer(rawBuffer, size); buffer.moveFreeToData(size); const GenericHeader &header = loaded_buffer->getHeader(); diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.h b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.h index ad0cc2a98a3..4768928603b 100644 --- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.h +++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.h @@ -24,8 +24,7 @@ protected: typedef vespalib::GenerationHolder GenerationHolder; public: - using EnumIndexCopyVector = vespalib::Array<EnumIndex, - vespalib::DefaultAlloc>; + using EnumIndexCopyVector = vespalib::Array<EnumIndex>; EnumStoreBase::Index getEnumIndex(DocId docId) const { return _enumIndices[docId]; } EnumHandle getE(DocId doc) const { return _enumIndices[doc].ref(); } diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.h b/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.h index 3793431f75b..125bf5587f3 100644 --- a/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.h +++ b/searchlib/src/vespa/searchlib/attribute/singlenumericenumattribute.h @@ -30,7 +30,7 @@ protected: typedef typename B::BaseClass::generation_t generation_t; typedef typename B::BaseClass::LoadedNumericValueT LoadedNumericValueT; typedef typename B::BaseClass::LoadedVector LoadedVector; - typedef SequentialReadModifyWriteVector<LoadedNumericValueT, vespalib::DefaultAlloc> LoadedVectorR; + typedef SequentialReadModifyWriteVector<LoadedNumericValueT> LoadedVectorR; typedef typename SingleValueEnumAttribute<B>::EnumStore EnumStore; typedef typename SingleValueEnumAttributeBase::EnumIndex EnumIndex; diff --git a/searchlib/src/vespa/searchlib/attribute/stringbase.h b/searchlib/src/vespa/searchlib/attribute/stringbase.h index a70cc6ecfab..fde8e3847fc 100644 --- a/searchlib/src/vespa/searchlib/attribute/stringbase.h +++ b/searchlib/src/vespa/searchlib/attribute/stringbase.h @@ -19,7 +19,7 @@ class StringEntryType; class StringAttribute : public AttributeVector { public: - typedef vespalib::Array<uint32_t, vespalib::DefaultAlloc> OffsetVector; + typedef vespalib::Array<uint32_t> OffsetVector; typedef const char * LoadedValueType; typedef EnumStoreBase::Index EnumIndex; typedef EnumStoreBase::IndexVector EnumIndexVector; diff --git a/searchlib/src/vespa/searchlib/attribute/tensorattribute.cpp b/searchlib/src/vespa/searchlib/attribute/tensorattribute.cpp index 6e504d8cfec..2a8d2202ad0 100644 --- a/searchlib/src/vespa/searchlib/attribute/tensorattribute.cpp +++ b/searchlib/src/vespa/searchlib/attribute/tensorattribute.cpp @@ -6,6 +6,7 @@ #include <vespa/vespalib/tensor/tensor.h> #include "tensorattributesaver.h" +using vespalib::eval::ValueType; using vespalib::tensor::Tensor; using vespalib::tensor::TensorMapper; @@ -36,10 +37,19 @@ public: }; Tensor::UP -createEmptyTensor(const TensorMapper &mapper) +createEmptyTensor(const TensorMapper *mapper) { vespalib::tensor::DefaultTensor::builder builder; - return mapper.map(*builder.build()); + if (mapper != nullptr) { + return mapper->map(*builder.build()); + } + return builder.build(); +} + +bool +shouldCreateMapper(const ValueType &tensorType) +{ + return tensorType.is_tensor() && !tensorType.dimensions().empty(); } } @@ -52,9 +62,12 @@ TensorAttribute::TensorAttribute(const vespalib::stringref &baseFileName, cfg.getGrowStrategy().getDocsGrowDelta(), getGenerationHolder()), _tensorStore(), - _tensorMapper(cfg.tensorType()), + _tensorMapper(), _compactGeneration(0) { + if (shouldCreateMapper(cfg.tensorType())) { + _tensorMapper = std::make_unique<TensorMapper>(cfg.tensorType()); + } } @@ -172,8 +185,7 @@ TensorAttribute::setTensor(DocId docId, const Tensor &tensor) updateUncommittedDocIdLimit(docId); // TODO: Handle generic tensor attribute in a better way ? RefType ref = _tensorStore.setTensor( - getConfig().tensorType().is_tensor() ? - *_tensorMapper.map(tensor) : tensor); + (_tensorMapper ? *_tensorMapper->map(tensor) : tensor)); // TODO: validate if following fence is sufficient. std::atomic_thread_fence(std::memory_order_release); // TODO: Check if refVector must consist of std::atomic<RefType> @@ -197,7 +209,7 @@ TensorAttribute::getTensor(DocId docId) const Tensor::UP TensorAttribute::getEmptyTensor() const { - return createEmptyTensor(_tensorMapper); + return createEmptyTensor(_tensorMapper.get()); } void diff --git a/searchlib/src/vespa/searchlib/attribute/tensorattribute.h b/searchlib/src/vespa/searchlib/attribute/tensorattribute.h index 27294cb5527..ec7f41bca18 100644 --- a/searchlib/src/vespa/searchlib/attribute/tensorattribute.h +++ b/searchlib/src/vespa/searchlib/attribute/tensorattribute.h @@ -24,12 +24,12 @@ private: RefVector _refVector; // docId -> ref in data store for serialized tensor TensorStore _tensorStore; // data store for serialized tensors - vespalib::tensor::TensorMapper _tensorMapper; // mapper to our tensor type + std::unique_ptr<vespalib::tensor::TensorMapper> _tensorMapper; // mapper to our tensor type uint64_t _compactGeneration; // Generation when last compact occurred void compactWorst(); public: - using RefCopyVector = vespalib::Array<RefType, vespalib::DefaultAlloc>; + using RefCopyVector = vespalib::Array<RefType>; using Tensor = vespalib::tensor::Tensor; TensorAttribute(const vespalib::stringref &baseFileName, const Config &cfg); ~TensorAttribute(); diff --git a/searchlib/src/vespa/searchlib/btree/btreenodeallocator.h b/searchlib/src/vespa/searchlib/btree/btreenodeallocator.h index 53e949bacbe..3029c287027 100644 --- a/searchlib/src/vespa/searchlib/btree/btreenodeallocator.h +++ b/searchlib/src/vespa/searchlib/btree/btreenodeallocator.h @@ -46,8 +46,8 @@ private: NodeStore _nodeStore; - typedef vespalib::Array<BTreeNode::Ref, vespalib::DefaultAlloc> RefVector; - typedef vespalib::Array<BTreeRootBaseType *, vespalib::DefaultAlloc> BTreeRootBaseTypeVector; + typedef vespalib::Array<BTreeNode::Ref> RefVector; + typedef vespalib::Array<BTreeRootBaseType *> BTreeRootBaseTypeVector; // Nodes that might not be frozen. RefVector _internalToFreeze; diff --git a/searchlib/src/vespa/searchlib/btree/bufferstate.cpp b/searchlib/src/vespa/searchlib/btree/bufferstate.cpp index 21f548187ee..36e63300034 100644 --- a/searchlib/src/vespa/searchlib/btree/bufferstate.cpp +++ b/searchlib/src/vespa/searchlib/btree/bufferstate.cpp @@ -3,12 +3,12 @@ #include "bufferstate.h" #include <limits> -namespace search -{ +using vespalib::DefaultAlloc; +using vespalib::alloc::Alloc; -namespace btree -{ +namespace search { +namespace btree { BufferTypeBase::BufferTypeBase(uint32_t clusterSize, uint32_t minClusters, @@ -125,9 +125,8 @@ BufferState::BufferState(void) _typeId(0), _clusterSize(0), _compacting(false), - _buffer() + _buffer(DefaultAlloc::create()) { - _buffer.reset(new Alloc()); } @@ -150,7 +149,7 @@ BufferState::onActive(uint32_t bufferId, uint32_t typeId, void *&buffer) { assert(buffer == NULL); - assert(_buffer->get() == NULL); + assert(_buffer.get() == NULL); assert(_state == FREE); assert(_typeHandler == NULL); assert(_allocElems == 0); @@ -163,15 +162,14 @@ BufferState::onActive(uint32_t bufferId, uint32_t typeId, assert(_freeListList == NULL || _freeListList->_head != this); size_t initialSizeNeeded = 0; - if (bufferId == 0) + if (bufferId == 0) { initialSizeNeeded = typeHandler->getClusterSize(); - size_t allocClusters = - typeHandler->calcClustersToAlloc(initialSizeNeeded + sizeNeeded, - maxClusters); + } + size_t allocClusters = typeHandler->calcClustersToAlloc(initialSizeNeeded + sizeNeeded, maxClusters); size_t allocSize = allocClusters * typeHandler->getClusterSize(); assert(allocSize >= initialSizeNeeded + sizeNeeded); - _buffer.reset(new Alloc(allocSize * typeHandler->elementSize())); - buffer = _buffer->get(); + _buffer.create(allocSize * typeHandler->elementSize()).swap(_buffer); + buffer = _buffer.get(); typeHandler->onActive(&_usedElems); assert(buffer != NULL); _allocElems = allocSize; @@ -212,13 +210,13 @@ BufferState::onHold(void) void BufferState::onFree(void *&buffer) { - assert(buffer == _buffer->get()); + assert(buffer == _buffer.get()); assert(_state == HOLD); assert(_typeHandler != NULL); assert(_deadElems <= _usedElems); assert(_holdElems == _usedElems - _deadElems); _typeHandler->destroyElements(buffer, _usedElems); - Alloc().swap(*_buffer); + DefaultAlloc::create().swap(_buffer); _typeHandler->onFree(_usedElems); buffer = NULL; _usedElems = 0; @@ -334,13 +332,12 @@ BufferState::fallbackResize(uint64_t newSize, size_t allocSize = allocClusters * _typeHandler->getClusterSize(); assert(allocSize >= newSize); assert(allocSize > _allocElems); - Alloc::UP newBuffer(std::make_unique<Alloc> - (allocSize * _typeHandler->elementSize())); - _typeHandler->fallbackCopy(newBuffer->get(), buffer, _usedElems); - holdBuffer.swap(*_buffer); + Alloc newBuffer = _buffer.create(allocSize * _typeHandler->elementSize()); + _typeHandler->fallbackCopy(newBuffer.get(), buffer, _usedElems); + holdBuffer.swap(_buffer); std::atomic_thread_fence(std::memory_order_release); _buffer = std::move(newBuffer); - buffer = _buffer->get(); + buffer = _buffer.get(); _allocElems = allocSize; std::atomic_thread_fence(std::memory_order_release); } diff --git a/searchlib/src/vespa/searchlib/btree/bufferstate.h b/searchlib/src/vespa/searchlib/btree/bufferstate.h index 3c7a3557952..bbb8f77d5ef 100644 --- a/searchlib/src/vespa/searchlib/btree/bufferstate.h +++ b/searchlib/src/vespa/searchlib/btree/bufferstate.h @@ -13,14 +13,8 @@ namespace search { namespace btree { - class BufferTypeBase { -private: - BufferTypeBase(const BufferTypeBase &rhs); - - BufferTypeBase & - operator=(const BufferTypeBase &rhs); protected: uint32_t _clusterSize; // Number of elements in an allocation unit uint32_t _minClusters; // Minimum number of clusters to allocate @@ -32,47 +26,21 @@ protected: const size_t *_lastUsedElems; // used elements in last active buffer public: - BufferTypeBase(uint32_t clusterSize, - uint32_t minClusters, - uint32_t maxClusters); - - virtual - ~BufferTypeBase(void); - - virtual void - destroyElements(void *buffer, size_t numElements) = 0; - - virtual void - fallbackCopy(void *newBuffer, - const void *oldBuffer, - size_t numElements) = 0; - - virtual void - cleanInitialElements(void *buffer) = 0; - - virtual size_t - elementSize(void) const = 0; - - virtual void - cleanHold(void *buffer, uint64_t offset, uint64_t len) = 0; - - uint32_t - getClusterSize(void) const - { - return _clusterSize; - } - - void - flushLastUsed(void); - - void - onActive(const size_t *usedElems); - - void - onHold(const size_t *usedElems); - - virtual void - onFree(size_t usedElems); + + BufferTypeBase(const BufferTypeBase &rhs) = delete; + BufferTypeBase & operator=(const BufferTypeBase &rhs) = delete; + BufferTypeBase(uint32_t clusterSize, uint32_t minClusters, uint32_t maxClusters); + virtual ~BufferTypeBase(); + virtual void destroyElements(void *buffer, size_t numElements) = 0; + virtual void fallbackCopy(void *newBuffer, const void *oldBuffer, size_t numElements) = 0; + virtual void cleanInitialElements(void *buffer) = 0; + virtual size_t elementSize() const = 0; + virtual void cleanHold(void *buffer, uint64_t offset, uint64_t len) = 0; + uint32_t getClusterSize() const { return _clusterSize; } + void flushLastUsed(); + void onActive(const size_t *usedElems); + void onHold(const size_t *usedElems); + virtual void onFree(size_t usedElems); /** * Calculate number of clusters to allocate for new buffer. @@ -82,9 +50,7 @@ public: * * @return number of clusters to allocate for new buffer */ - virtual size_t - calcClustersToAlloc(size_t sizeNeeded, - uint64_t clusterRefSize) const; + virtual size_t calcClustersToAlloc(size_t sizeNeeded, uint64_t clusterRefSize) const; uint32_t getActiveBuffers() const { return _activeBuffers; } }; @@ -93,41 +59,21 @@ public: template <typename EntryType> class BufferType : public BufferTypeBase { -private: - BufferType(const BufferType &rhs); - - BufferType & - operator=(const BufferType &rhs); public: EntryType _emptyEntry; - BufferType(uint32_t clusterSize, - uint32_t minClusters, - uint32_t maxClusters) + BufferType(const BufferType &rhs) = delete; + BufferType & operator=(const BufferType &rhs) = delete; + BufferType(uint32_t clusterSize, uint32_t minClusters, uint32_t maxClusters) : BufferTypeBase(clusterSize, minClusters, maxClusters), _emptyEntry() - { - } - - virtual void - destroyElements(void *buffer, size_t numElements); + { } - virtual void - fallbackCopy(void *newBuffer, - const void *oldBuffer, - size_t numElements); - - virtual void - cleanInitialElements(void *buffer); - - virtual void - cleanHold(void *buffer, uint64_t offset, uint64_t len); - - virtual size_t - elementSize(void) const - { - return sizeof(EntryType); - } + void destroyElements(void *buffer, size_t numElements) override; + void fallbackCopy(void *newBuffer, const void *oldBuffer, size_t numElements) override; + void cleanInitialElements(void *buffer) override; + void cleanHold(void *buffer, uint64_t offset, uint64_t len) override; + size_t elementSize() const override { return sizeof(EntryType); } }; @@ -186,25 +132,20 @@ BufferType<EntryType>::cleanHold(void *buffer, uint64_t offset, uint64_t len) class BufferState { public: - typedef vespalib::DefaultAlloc Alloc; + typedef vespalib::alloc::Alloc Alloc; class FreeListList { public: BufferState *_head; - FreeListList(void) - : _head(NULL) - { - } - - ~FreeListList(void); + FreeListList() : _head(NULL) { } + ~FreeListList(); }; - typedef vespalib::Array<EntryRef, vespalib::DefaultAlloc> FreeList; + typedef vespalib::Array<EntryRef> FreeList; - enum State - { + enum State { FREE, ACTIVE, HOLD @@ -224,18 +165,17 @@ public: BufferState *_prevHasFree; BufferTypeBase *_typeHandler; - uint32_t _typeId; - uint32_t _clusterSize; - bool _compacting; + uint32_t _typeId; + uint32_t _clusterSize; + bool _compacting; /* * TODO: Check if per-buffer free lists are useful, or if *compaction should always be used to free up whole buffers. */ - BufferState(void); - - ~BufferState(void); + BufferState(); + ~BufferState(); /** * Transition from FREE to ACTIVE state. @@ -250,21 +190,17 @@ public: */ void onActive(uint32_t bufferId, uint32_t typeId, BufferTypeBase *typeHandler, - size_t sizeNeeded, - size_t maxSize, - void *&buffer); + size_t sizeNeeded, size_t maxSize, void *&buffer); /** * Transition from ACTIVE to HOLD state. */ - void - onHold(void); + void onHold(); /** * Transition from HOLD to FREE state. */ - void - onFree(void *&buffer); + void onFree(void *&buffer); /** * Set list of buffer states with nonempty free lists. @@ -272,115 +208,57 @@ public: * @param freeListList List of buffer states. If NULL then free lists * are disabled. */ - void - setFreeListList(FreeListList *freeListList); + void setFreeListList(FreeListList *freeListList); /** * Add buffer state to list of buffer states with nonempty free lists. */ - void - addToFreeListList(void); + void addToFreeListList(); /** * Remove buffer state from list of buffer states with nonempty free lists. */ - void - removeFromFreeListList(void); + void removeFromFreeListList(); /** * Disable hold of elements, just mark then as dead without * cleanup. Typically used when tearing down data structure in a * controlled manner. */ - void - disableElemHoldList(void); + void disableElemHoldList(); /** * Pop element from free list. */ - EntryRef - popFreeList(void) - { + EntryRef popFreeList() { EntryRef ret = _freeList.back(); _freeList.pop_back(); - if (_freeList.empty()) + if (_freeList.empty()) { removeFromFreeListList(); + } _deadElems -= _clusterSize; return ret; } - - size_t - size(void) const - { - return _usedElems; - } - - size_t - capacity(void) const - { - return _allocElems; - } - - size_t - remaining(void) const - { - return _allocElems - _usedElems; - } - - void - pushed_back(uint64_t len) - { - _usedElems += len; - } - - void - cleanHold(void *buffer, uint64_t offset, uint64_t len) - { - _typeHandler->cleanHold(buffer, offset, len); - } - - void - dropBuffer(void *&buffer); - - uint32_t - getTypeId(void) const - { - return _typeId; - } - - uint32_t - getClusterSize(void) const - { - return _clusterSize; - } - + size_t size() const { return _usedElems; } + size_t capacity() const { return _allocElems; } + size_t remaining() const { return _allocElems - _usedElems; } + void pushed_back(uint64_t len) { _usedElems += len; } + void cleanHold(void *buffer, uint64_t offset, uint64_t len) { _typeHandler->cleanHold(buffer, offset, len); } + void dropBuffer(void *&buffer); + uint32_t getTypeId() const { return _typeId; } + uint32_t getClusterSize() const { return _clusterSize; } uint64_t getDeadElems() const { return _deadElems; } - - bool - getCompacting(void) const - { - return _compacting; - } - - void - setCompacting(void) - { - _compacting = true; - } - - void - fallbackResize(uint64_t newSize, - size_t maxClusters, - void *&buffer, - Alloc &holdBuffer); + bool getCompacting() const { return _compacting; } + void setCompacting() { _compacting = true; } + void fallbackResize(uint64_t newSize, size_t maxClusters, void *&buffer, Alloc &holdBuffer); bool isActive(uint32_t typeId) const { return ((_state == ACTIVE) && (_typeId == typeId)); } private: - Alloc::UP _buffer; + Alloc _buffer; }; diff --git a/searchlib/src/vespa/searchlib/btree/datastorebase.h b/searchlib/src/vespa/searchlib/btree/datastorebase.h index 0c44b485d18..f948ada3d2f 100644 --- a/searchlib/src/vespa/searchlib/btree/datastorebase.h +++ b/searchlib/src/vespa/searchlib/btree/datastorebase.h @@ -57,7 +57,7 @@ protected: } }; - typedef vespalib::Array<ElemHold1ListElem, vespalib::DefaultAlloc> ElemHold1List; + typedef vespalib::Array<ElemHold1ListElem> ElemHold1List; typedef std::deque<ElemHold2ListElem> ElemHold2List; class FallbackHold : public vespalib::GenerationHeldBase diff --git a/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp b/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp index 59d190b2b50..255ee64633d 100644 --- a/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp +++ b/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp @@ -5,20 +5,20 @@ #include <vespa/fastos/fastos.h> #include "allocatedbitvector.h" -namespace search -{ +namespace search { using vespalib::nbostream; using vespalib::GenerationHeldBase; using vespalib::GenerationHeldAlloc; using vespalib::GenerationHolder; +using vespalib::DefaultAlloc; void AllocatedBitVector::alloc() { uint32_t words = capacityWords(); words += (-words & 15); // Pad to 64 byte alignment const size_t sz(words * sizeof(Word)); - Alloc(sz).swap(_alloc); + DefaultAlloc::create(sz).swap(_alloc); assert(_alloc.size()/sizeof(Word) >= words); // Clear padding memset(static_cast<char *>(_alloc.get()) + sizeBytes(), 0, sz - sizeBytes()); diff --git a/searchlib/src/vespa/searchlib/common/allocatedbitvector.h b/searchlib/src/vespa/searchlib/common/allocatedbitvector.h index 8a52a07e29b..df1de516156 100644 --- a/searchlib/src/vespa/searchlib/common/allocatedbitvector.h +++ b/searchlib/src/vespa/searchlib/common/allocatedbitvector.h @@ -17,8 +17,7 @@ class BitVectorTest; class AllocatedBitVector : public BitVector { public: - typedef vespalib::AutoAlloc<0x800000, 0x1000> Alloc; - + using Alloc = vespalib::alloc::Alloc; /** * Class constructor specifying size but not content. New bitvector * is cleared. diff --git a/searchlib/src/vespa/searchlib/common/bitvector.cpp b/searchlib/src/vespa/searchlib/common/bitvector.cpp index 25edae290de..441351ab724 100644 --- a/searchlib/src/vespa/searchlib/common/bitvector.cpp +++ b/searchlib/src/vespa/searchlib/common/bitvector.cpp @@ -14,6 +14,7 @@ using vespalib::make_string; using vespalib::IllegalArgumentException; using vespalib::hwaccelrated::IAccelrated; using vespalib::Optimized; +using vespalib::DefaultAlloc; namespace { @@ -31,8 +32,7 @@ void verifyContains(const search::BitVector & a, const search::BitVector & b) } ///////////////////////////////// -namespace search -{ +namespace search { using vespalib::nbostream; using vespalib::GenerationHeldBase; @@ -323,7 +323,7 @@ BitVector::create(Index numberOfElements, size_t vectorsize = getFileBytes(numberOfElements); file.DirectIOPadding(offset, vectorsize, padbefore, padafter); assert((padbefore & (getAlignment() - 1)) == 0); - AllocatedBitVector::Alloc alloc(padbefore + vectorsize + padafter); + AllocatedBitVector::Alloc alloc = DefaultAlloc::create(padbefore + vectorsize + padafter, 0x1000000, 0x1000); void * alignedBuffer = alloc.get(); file.ReadBuf(alignedBuffer, alloc.size(), offset - padbefore); bv.reset(new AllocatedBitVector(numberOfElements, std::move(alloc), padbefore)); diff --git a/searchlib/src/vespa/searchlib/common/bitvector.h b/searchlib/src/vespa/searchlib/common/bitvector.h index 70864c938d3..f5c40a734e9 100644 --- a/searchlib/src/vespa/searchlib/common/bitvector.h +++ b/searchlib/src/vespa/searchlib/common/bitvector.h @@ -36,13 +36,15 @@ private: static Init _initializer; }; -class BitVector : protected BitWord, private vespalib::noncopyable +class BitVector : protected BitWord { public: typedef BitWord::Index Index; typedef vespalib::GenerationHolder GenerationHolder; typedef vespalib::GenerationHeldBase GenerationHeldBase; typedef std::unique_ptr<BitVector> UP; + BitVector(const BitVector &) = delete; + BitVector& operator = (const BitVector &) = delete; virtual ~BitVector() { } bool operator == (const BitVector &right) const; const void * getStart() const { return _words; } diff --git a/searchlib/src/vespa/searchlib/common/partialbitvector.cpp b/searchlib/src/vespa/searchlib/common/partialbitvector.cpp index b3472abe89a..59130ac7ec9 100644 --- a/searchlib/src/vespa/searchlib/common/partialbitvector.cpp +++ b/searchlib/src/vespa/searchlib/common/partialbitvector.cpp @@ -5,13 +5,13 @@ #include <vespa/fastos/fastos.h> #include "partialbitvector.h" -///////////////////////////////// -namespace search -{ +using vespalib::DefaultAlloc; + +namespace search { PartialBitVector::PartialBitVector(Index start, Index end) : BitVector(), - _alloc(numActiveBytes(start, end)) + _alloc(DefaultAlloc::create(numActiveBytes(start, end), 0x1000000, 0x1000)) { init(_alloc.get(), start, end); clear(); diff --git a/searchlib/src/vespa/searchlib/common/partialbitvector.h b/searchlib/src/vespa/searchlib/common/partialbitvector.h index 94facc9512a..c6c52b35808 100644 --- a/searchlib/src/vespa/searchlib/common/partialbitvector.h +++ b/searchlib/src/vespa/searchlib/common/partialbitvector.h @@ -16,8 +16,6 @@ namespace search { class PartialBitVector : public BitVector { public: - typedef vespalib::AutoAlloc<0x800000, 0x1000> Alloc; - /** * Class constructor specifying startindex and endindex. * Allocated area is zeroed. @@ -31,7 +29,7 @@ public: virtual ~PartialBitVector(void); private: - Alloc _alloc; + vespalib::alloc::Alloc _alloc; }; } // namespace search diff --git a/searchlib/src/vespa/searchlib/common/rcuvector.h b/searchlib/src/vespa/searchlib/common/rcuvector.h index 9c5954848c4..3c9274e3e94 100644 --- a/searchlib/src/vespa/searchlib/common/rcuvector.h +++ b/searchlib/src/vespa/searchlib/common/rcuvector.h @@ -44,7 +44,7 @@ class RcuVectorBase "Value type must be trivially destructible"); protected: - typedef vespalib::Array<T, vespalib::DefaultAlloc> Array; + typedef vespalib::Array<T> Array; typedef vespalib::GenerationHandler::generation_t generation_t; typedef vespalib::GenerationHolder GenerationHolder; Array _data; diff --git a/searchlib/src/vespa/searchlib/common/resultset.cpp b/searchlib/src/vespa/searchlib/common/resultset.cpp index ac69680b427..cdbfebdc1a9 100644 --- a/searchlib/src/vespa/searchlib/common/resultset.cpp +++ b/searchlib/src/vespa/searchlib/common/resultset.cpp @@ -3,15 +3,16 @@ // Copyright (C) 2003 Overture Services Norway AS #include <vespa/fastos/fastos.h> -#include <vespa/log/log.h> -LOG_SETUP(""); - #include <vespa/searchlib/common/resultset.h> #include <vespa/searchlib/common/bitvector.h> -namespace search -{ +using vespalib::DefaultAlloc; +using vespalib::alloc::Alloc; + +namespace search { +//Above 32M we hand back to the OS directly. +constexpr size_t MMAP_LIMIT = 0x2000000; ResultSet::ResultSet(void) : _elemsUsedInRankedHitsArray(0u), @@ -50,11 +51,9 @@ void ResultSet::allocArray(unsigned int arrayAllocated) { if (arrayAllocated > 0) { - ArrayAlloc n(arrayAllocated * sizeof(RankedHit)); - _rankedHitsArray.swap(n); + DefaultAlloc::create(arrayAllocated * sizeof(RankedHit), MMAP_LIMIT).swap(_rankedHitsArray); } else { - ArrayAlloc n; - _rankedHitsArray.swap(n); + Alloc().swap(_rankedHitsArray); } _rankedHitsArrayAllocElements = arrayAllocated; _elemsUsedInRankedHitsArray = 0; @@ -100,7 +99,7 @@ ResultSet::mergeWithBitOverflow(void) uint32_t bidx = bitVector->getFirstTrueBit(); uint32_t actualHits = getNumHits(); - ArrayAlloc newHitsAlloc(actualHits*sizeof(RankedHit)); + Alloc newHitsAlloc = DefaultAlloc::create(actualHits*sizeof(RankedHit), MMAP_LIMIT); RankedHit *newHitsArray = static_cast<RankedHit *>(newHitsAlloc.get()); RankedHit * tgtA = newHitsArray; diff --git a/searchlib/src/vespa/searchlib/common/resultset.h b/searchlib/src/vespa/searchlib/common/resultset.h index 4489654d0a5..46129b1fb57 100644 --- a/searchlib/src/vespa/searchlib/common/resultset.h +++ b/searchlib/src/vespa/searchlib/common/resultset.h @@ -13,14 +13,12 @@ namespace search class ResultSet { private: - // Everything above 8m we return to OS. - typedef vespalib::AutoAlloc<0x800000> ArrayAlloc; ResultSet& operator=(const ResultSet &); unsigned int _elemsUsedInRankedHitsArray; unsigned int _rankedHitsArrayAllocElements; - BitVector::UP _bitOverflow; - ArrayAlloc _rankedHitsArray; + BitVector::UP _bitOverflow; + vespalib::alloc::Alloc _rankedHitsArray; public: typedef std::unique_ptr<ResultSet> UP; diff --git a/searchlib/src/vespa/searchlib/common/sortresults.cpp b/searchlib/src/vespa/searchlib/common/sortresults.cpp index 485c0d550b9..76109f3ec42 100644 --- a/searchlib/src/vespa/searchlib/common/sortresults.cpp +++ b/searchlib/src/vespa/searchlib/common/sortresults.cpp @@ -17,8 +17,12 @@ using search::common::SortInfo; using search::attribute::IAttributeContext; using search::attribute::IAttributeVector; +using vespalib::DefaultAlloc; + namespace { +constexpr size_t MMAP_LIMIT = 0x2000000; + template<typename T> class RadixHelper { @@ -498,7 +502,7 @@ FastS_SortSpec::sortResults(RankedHit a[], uint32_t n, uint32_t topn) } else if (_method == 1) { std::sort(sortData, sortData + n, StdSortDataCompare(&_binarySortData[0])); } else { - vespalib::Array<uint32_t, Alloc> radixScratchPad(n); + vespalib::Array<uint32_t> radixScratchPad(n, DefaultAlloc::create(0, MMAP_LIMIT)); search::radix_sort(SortDataRadix(&_binarySortData[0]), StdSortDataCompare(&_binarySortData[0]), SortDataEof(), 1, sortData, n, &radixScratchPad[0], 0, 96, topn); } for (uint32_t i(0), m(_sortDataArray.size()); i < m; ++i) { diff --git a/searchlib/src/vespa/searchlib/common/sortresults.h b/searchlib/src/vespa/searchlib/common/sortresults.h index 712150bc2c5..53b0c4b0e8d 100644 --- a/searchlib/src/vespa/searchlib/common/sortresults.h +++ b/searchlib/src/vespa/searchlib/common/sortresults.h @@ -83,7 +83,7 @@ public: //----------------------------------------------------------------------------- -class FastS_SortSpec : public FastS_IResultSorter, public vespalib::noncopyable +class FastS_SortSpec : public FastS_IResultSorter { private: friend class MultilevelSortTest; @@ -119,9 +119,8 @@ public: private: typedef std::vector<VectorRef> VectorRefList; - typedef vespalib::AutoAlloc<0x800000> Alloc; - typedef vespalib::Array<uint8_t, Alloc> BinarySortData; - typedef vespalib::Array<SortData, Alloc> SortDataArray; + typedef vespalib::Array<uint8_t> BinarySortData; + typedef vespalib::Array<SortData> SortDataArray; using ConverterFactory = search::common::ConverterFactory; vespalib::Doom _doom; const ConverterFactory & _ucaFactory; @@ -136,6 +135,8 @@ private: uint8_t * realloc(uint32_t n, size_t & variableWidth, uint32_t & available, uint32_t & dataSize, uint8_t *mySortData); public: + FastS_SortSpec(const FastS_SortSpec &) = delete; + FastS_SortSpec & operator = (const FastS_SortSpec &) = delete; FastS_SortSpec(const vespalib::Doom & doom, const ConverterFactory & ucaFactory, int method=2); virtual ~FastS_SortSpec(); @@ -150,8 +151,7 @@ public: void copySortData(uint32_t offset, uint32_t n, uint32_t *idx, char *buf); void freeSortData(); bool hasSortData() const; - void initWithoutSorting(const search::RankedHit * hits, - uint32_t hitCnt); + void initWithoutSorting(const search::RankedHit * hits, uint32_t hitCnt); static int Compare(const FastS_SortSpec *self, const SortData &a, const SortData &b); }; diff --git a/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp b/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp index 8cc12c88463..6d693bb6f42 100644 --- a/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/diskindex.cpp @@ -47,8 +47,10 @@ DiskIndex::DiskIndex(const vespalib::string &indexDir, size_t cacheSize) _bitVectorDicts(), _dicts(), _tuneFileSearch(), - _cache(*this, cacheSize) + _cache(*this, cacheSize), + _size(0) { + calculateSize(); } bool @@ -322,11 +324,11 @@ DiskIndex::readBitVector(const LookupResult &lookupRes) const } -uint64_t -DiskIndex::getSize() const +void +DiskIndex::calculateSize() { search::DirectoryTraverse dirt(_indexDir.c_str()); - return dirt.GetTreeSize(); + _size = dirt.GetTreeSize(); } diff --git a/searchlib/src/vespa/searchlib/diskindex/diskindex.h b/searchlib/src/vespa/searchlib/diskindex/diskindex.h index 840f4c32738..3a40bb92f8b 100644 --- a/searchlib/src/vespa/searchlib/diskindex/diskindex.h +++ b/searchlib/src/vespa/searchlib/diskindex/diskindex.h @@ -78,6 +78,9 @@ private: std::vector<std::unique_ptr<index::DictionaryFileRandRead>> _dicts; TuneFileSearch _tuneFileSearch; Cache _cache; + uint64_t _size; + + void calculateSize(); bool loadSchema(void); @@ -159,8 +162,7 @@ public: * Get the size on disk of this index. * @return the size of the index. */ - uint64_t - getSize() const; + uint64_t getSize() const { return _size; } const index::Schema & getSchema(void) const diff --git a/searchlib/src/vespa/searchlib/diskindex/docidmapper.h b/searchlib/src/vespa/searchlib/diskindex/docidmapper.h index 43e1ea44b89..f150e8636a5 100644 --- a/searchlib/src/vespa/searchlib/diskindex/docidmapper.h +++ b/searchlib/src/vespa/searchlib/diskindex/docidmapper.h @@ -12,7 +12,7 @@ class BitVector; namespace diskindex { -typedef vespalib::Array<uint8_t, vespalib::DefaultAlloc> SelectorArray; +typedef vespalib::Array<uint8_t> SelectorArray; class DocIdMapping { diff --git a/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp b/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp index a88ce029814..0ec75cfbdf2 100644 --- a/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp @@ -152,9 +152,9 @@ public: } }; - typedef vespalib::Array<FHWordDocFieldFeatures, vespalib::DefaultAlloc> FHWordDocFieldFeaturesVector; - typedef vespalib::Array<FHWordDocElementFeatures, vespalib::DefaultAlloc> FHWordDocElementFeaturesVector; - typedef vespalib::Array<FHWordDocElementWordPosFeatures, vespalib::DefaultAlloc> FHWordDocElementWordPosFeaturesVector; + typedef vespalib::Array<FHWordDocFieldFeatures> FHWordDocFieldFeaturesVector; + typedef vespalib::Array<FHWordDocElementFeatures> FHWordDocElementFeaturesVector; + typedef vespalib::Array<FHWordDocElementWordPosFeatures> FHWordDocElementWordPosFeaturesVector; FHWordDocFieldFeaturesVector _wdff; FHWordDocElementFeaturesVector _wdfef; diff --git a/searchlib/src/vespa/searchlib/diskindex/wordnummapper.h b/searchlib/src/vespa/searchlib/diskindex/wordnummapper.h index a1a72757f22..d12e0211b94 100644 --- a/searchlib/src/vespa/searchlib/diskindex/wordnummapper.h +++ b/searchlib/src/vespa/searchlib/diskindex/wordnummapper.h @@ -14,7 +14,7 @@ class WordNumMapper; class WordNumMapping { - typedef vespalib::Array<uint64_t, vespalib::DefaultAlloc> Array; + typedef vespalib::Array<uint64_t> Array; static uint64_t noWordNumHigh(void) diff --git a/searchlib/src/vespa/searchlib/docstore/compacter.cpp b/searchlib/src/vespa/searchlib/docstore/compacter.cpp index 6a57b4fc791..d787f934efc 100644 --- a/searchlib/src/vespa/searchlib/docstore/compacter.cpp +++ b/searchlib/src/vespa/searchlib/docstore/compacter.cpp @@ -24,7 +24,7 @@ BucketCompacter::BucketCompacter(size_t maxSignificantBucketBits, const Compress _bucketizer(bucketizer), _writeCount(0), _lock(), - _backingMemory(0x40000000, &_lock), + _backingMemory(vespalib::DefaultAlloc::create(0x40000000), &_lock), _tmpStore(), _lidGuard(ds.getLidReadGuard()), _bucketizerGuard(bucketizer.getGuard()), diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.h b/searchlib/src/vespa/searchlib/docstore/documentstore.h index d44c94fe7f8..7ca3b98d525 100644 --- a/searchlib/src/vespa/searchlib/docstore/documentstore.h +++ b/searchlib/src/vespa/searchlib/docstore/documentstore.h @@ -173,7 +173,7 @@ private: _compressedSize(rhs._compressedSize), _uncompressedSize(rhs._uncompressedSize), _compression(rhs._compression), - _buf(rhs.size()) + _buf(vespalib::DefaultAlloc::create(rhs.size())) { memcpy(get(), rhs.get(), size()); } @@ -213,7 +213,7 @@ private: size_t _compressedSize; size_t _uncompressedSize; document::CompressionConfig::Type _compression; - vespalib::DefaultAlloc _buf; + vespalib::alloc::Alloc _buf; }; class BackingStore { public: diff --git a/searchlib/src/vespa/searchlib/docstore/filechunk.cpp b/searchlib/src/vespa/searchlib/docstore/filechunk.cpp index 957249aee66..f76d12ed7aa 100644 --- a/searchlib/src/vespa/searchlib/docstore/filechunk.cpp +++ b/searchlib/src/vespa/searchlib/docstore/filechunk.cpp @@ -293,7 +293,7 @@ public: } }; -typedef vespalib::Array<TmpChunkMeta, vespalib::DefaultAlloc> TmpChunkMetaV; +typedef vespalib::Array<TmpChunkMeta> TmpChunkMetaV; namespace { @@ -326,7 +326,7 @@ FileChunk::erase() } size_t -FileChunk::updateLidMap(ISetLid & ds, uint64_t serialNum) +FileChunk::updateLidMap(const LockGuard & guard, ISetLid & ds, uint64_t serialNum) { size_t sz(0); assert(_chunkInfo.empty()); @@ -391,7 +391,7 @@ FileChunk::updateLidMap(ISetLid & ds, uint64_t serialNum) bucketMap.recordLid(bucketId); globalBucketMap.recordLid(bucketId); } - ds.setLid(lidMeta.getLid(), LidInfo(getFileId().getId(), _chunkInfo.size(), lidMeta.size())); + ds.setLid(guard, lidMeta.getLid(), LidInfo(getFileId().getId(), _chunkInfo.size(), lidMeta.size())); incEntries(); _addedBytes += adjustSize(lidMeta.size()); } diff --git a/searchlib/src/vespa/searchlib/docstore/filechunk.h b/searchlib/src/vespa/searchlib/docstore/filechunk.h index 20b29d7fbb3..27cccc59290 100644 --- a/searchlib/src/vespa/searchlib/docstore/filechunk.h +++ b/searchlib/src/vespa/searchlib/docstore/filechunk.h @@ -66,14 +66,15 @@ typedef std::vector<LidInfoWithLid> LidInfoWithLidV; class ISetLid { public: + using LockGuard = vespalib::LockGuard; virtual ~ISetLid() { } - virtual void setLid(uint32_t lid, const LidInfo & lm) = 0; + virtual void setLid(const LockGuard & guard, uint32_t lid, const LidInfo & lm) = 0; }; class IGetLid { public: - typedef vespalib::GenerationHandler::Guard Guard; + using Guard = vespalib::GenerationHandler::Guard; virtual ~IGetLid() { } virtual LidInfo getLid(Guard & guard, uint32_t lid) const = 0; @@ -185,6 +186,7 @@ private: class FileChunk { public: + using LockGuard = vespalib::LockGuard; class NameId { public: explicit NameId(size_t id) : _id(id) { } @@ -220,7 +222,7 @@ public: FileChunk(FileId fileId, NameId nameId, const vespalib::string & baseName, const TuneFileSummary & tune, const IBucketizer * bucketizer, bool skipCrcOnRead); virtual ~FileChunk(); - virtual size_t updateLidMap(ISetLid & lidMap, uint64_t serialNum); + virtual size_t updateLidMap(const LockGuard & guard, ISetLid & lidMap, uint64_t serialNum); virtual ssize_t read(uint32_t lid, SubChunkId chunk, vespalib::DataBuffer & buffer) const; virtual void read(LidInfoWithLidV::const_iterator begin, size_t count, IBufferVisitor & visitor) const; void remove(uint32_t lid, uint32_t size); @@ -343,7 +345,7 @@ protected: ssize_t read(uint32_t lid, SubChunkId chunkId, const ChunkInfo & chunkInfo, vespalib::DataBuffer & buffer) const; void read(LidInfoWithLidV::const_iterator begin, size_t count, ChunkInfo ci, IBufferVisitor & visitor) const; - typedef vespalib::Array<ChunkInfo, vespalib::DefaultAlloc> ChunkInfoVector; + typedef vespalib::Array<ChunkInfo> ChunkInfoVector; const IBucketizer * _bucketizer; size_t _addedBytes; TuneFileSummary _tune; diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp index b2434a1dd3b..732e844c757 100644 --- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp +++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp @@ -27,6 +27,7 @@ using std::runtime_error; using document::BucketId; using docstore::StoreByBucket; using docstore::BucketCompacter; +using namespace std::literals; LogDataStore::LogDataStore(vespalib::ThreadStackExecutorBase &executor, const vespalib::string &dirName, @@ -89,8 +90,9 @@ void LogDataStore::updateLidMap() { uint64_t lastSerialNum(0); + LockGuard guard(_updateLock); for (FileChunk::UP & fc : _fileChunks) { - fc->updateLidMap(*this, lastSerialNum); + fc->updateLidMap(guard, *this, lastSerialNum); lastSerialNum = fc->getLastPersistedSerialNum(); } } @@ -165,7 +167,7 @@ LogDataStore::write(LockGuard guard, WriteableFileChunk & destination, uint64_t serialNum, uint32_t lid, const void * buffer, size_t len) { LidInfo lm = destination.append(serialNum, lid, buffer, len); - setLid(lid, lm); + setLid(guard, lid, lm); if (destination.getFileId() == getActiveFileId(guard)) { requireSpace(guard, destination); } @@ -463,21 +465,32 @@ void LogDataStore::compactFile(FileId fileId) flushFileAndWait(guard, compactTo, 0); compactTo.freeze(); } + compacter.reset(); - std::this_thread::sleep_for(std::chrono::seconds(10));; + std::this_thread::sleep_for(1s); + uint64_t currentGeneration; + { + LockGuard guard(_updateLock); + currentGeneration = _genHandler.getCurrentGeneration(); + _genHandler.incGeneration(); + } + FileChunk::UP toDie; for (;;) { LockGuard guard(_updateLock); - if (_holdFileChunks[fc->getFileId().getId()] == 0u) { - toDie = std::move(fc); - break; + _genHandler.updateFirstUsedGeneration(); + if (currentGeneration < _genHandler.getFirstUsedGeneration()) { + if (_holdFileChunks[fc->getFileId().getId()] == 0u) { + toDie = std::move(fc); + break; + } } guard.unlock(); /* * Wait for requireSpace() and flush() methods to leave chunk * alone. */ - std::this_thread::sleep_for(std::chrono::seconds(1));; + std::this_thread::sleep_for(1s);; } toDie->erase(); LockGuard guard(_updateLock); @@ -846,8 +859,9 @@ LogDataStore::scanDir(const vespalib::string &dir, const vespalib::string &suffi } void -LogDataStore::setLid(uint32_t lid, const LidInfo & meta) +LogDataStore::setLid(const LockGuard & guard, uint32_t lid, const LidInfo & meta) { + (void) guard; if (lid < _lidInfo.size()) { _genHandler.updateFirstUsedGeneration(); _lidInfo.removeOldGenerations(_genHandler.getFirstUsedGeneration()); diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.h b/searchlib/src/vespa/searchlib/docstore/logdatastore.h index 58bdab1de8f..abb0c536fe2 100644 --- a/searchlib/src/vespa/searchlib/docstore/logdatastore.h +++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.h @@ -206,7 +206,7 @@ private: void waitForUnblock(); // Implements ISetLid API - void setLid(uint32_t lid, const LidInfo & lm) override; + void setLid(const LockGuard & guard, uint32_t lid, const LidInfo & lm) override; void compactWorst(); void compactFile(FileId chunkId); diff --git a/searchlib/src/vespa/searchlib/docstore/storebybucket.h b/searchlib/src/vespa/searchlib/docstore/storebybucket.h index a7144596d0b..b6a25ef1dff 100644 --- a/searchlib/src/vespa/searchlib/docstore/storebybucket.h +++ b/searchlib/src/vespa/searchlib/docstore/storebybucket.h @@ -60,7 +60,7 @@ private: uint32_t _chunkId; uint32_t _lid; }; - using IndexVector = vespalib::Array<Index, vespalib::DefaultAlloc>; + using IndexVector = vespalib::Array<Index>; uint64_t _chunkSerial; Chunk::UP _current; std::map<uint64_t, IndexVector> _where; diff --git a/searchlib/src/vespa/searchlib/docstore/visitcache.cpp b/searchlib/src/vespa/searchlib/docstore/visitcache.cpp index 43a8c1b33aa..89bfb2bfae4 100644 --- a/searchlib/src/vespa/searchlib/docstore/visitcache.cpp +++ b/searchlib/src/vespa/searchlib/docstore/visitcache.cpp @@ -5,6 +5,13 @@ namespace search { namespace docstore { +using vespalib::ConstBufferRef; +using vespalib::LockGuard; +using vespalib::DataBuffer; +using vespalib::alloc::Alloc; +using vespalib::alloc::MemoryAllocator; +using vespalib::DefaultAlloc; + KeySet::KeySet(uint32_t key) : _keys() { @@ -24,7 +31,7 @@ KeySet::contains(const KeySet &rhs) const { BlobSet::BlobSet() : _positions(), - _buffer() + _buffer(DefaultAlloc::create(0, 16 * MemoryAllocator::HUGEPAGE_SIZE), 0) { } namespace { @@ -35,25 +42,25 @@ size_t getBufferSize(const BlobSet::Positions & p) { } -BlobSet::BlobSet(const Positions & positions, vespalib::DefaultAlloc && buffer) : +BlobSet::BlobSet(const Positions & positions, Alloc && buffer) : _positions(positions), _buffer(std::move(buffer), getBufferSize(_positions)) { } void -BlobSet::append(uint32_t lid, vespalib::ConstBufferRef blob) { +BlobSet::append(uint32_t lid, ConstBufferRef blob) { _positions.emplace_back(lid, getBufferSize(_positions), blob.size()); _buffer.write(blob.c_str(), blob.size()); } -vespalib::ConstBufferRef +ConstBufferRef BlobSet::get(uint32_t lid) const { - vespalib::ConstBufferRef buf; + ConstBufferRef buf; for (LidPosition pos : _positions) { if (pos.lid() == lid) { - buf = vespalib::ConstBufferRef(_buffer.c_str() + pos.offset(), pos.size()); + buf = ConstBufferRef(_buffer.c_str() + pos.offset(), pos.size()); break; } } @@ -73,8 +80,8 @@ CompressedBlobSet::CompressedBlobSet(const document::CompressionConfig &compress _buffer() { if ( ! _positions.empty() ) { - vespalib::DataBuffer compressed; - vespalib::ConstBufferRef org = uncompressed.getBuffer(); + DataBuffer compressed; + ConstBufferRef org = uncompressed.getBuffer(); _compression = document::compress(compression, org, compressed, false); _buffer.resize(compressed.getDataLen()); memcpy(_buffer, compressed.getData(), compressed.getDataLen()); @@ -84,9 +91,10 @@ CompressedBlobSet::CompressedBlobSet(const document::CompressionConfig &compress BlobSet CompressedBlobSet::getBlobSet() const { - vespalib::DataBuffer uncompressed; + // These are frequent lage allocations that are to expensive to mmap. + DataBuffer uncompressed(0, 1, DefaultAlloc::create(0, 16 * MemoryAllocator::HUGEPAGE_SIZE)); if ( ! _positions.empty() ) { - document::decompress(_compression, getBufferSize(_positions), vespalib::ConstBufferRef(_buffer.c_str(), _buffer.size()), uncompressed, false); + document::decompress(_compression, getBufferSize(_positions), ConstBufferRef(_buffer.c_str(), _buffer.size()), uncompressed, false); } return BlobSet(_positions, uncompressed.stealBuffer()); } @@ -103,14 +111,14 @@ public: VisitCollector() : _blobSet() { } - void visit(uint32_t lid, vespalib::ConstBufferRef buf) override; + void visit(uint32_t lid, ConstBufferRef buf) override; const BlobSet & getBlobSet() const { return _blobSet; } private: BlobSet _blobSet; }; void -VisitCollector::visit(uint32_t lid, vespalib::ConstBufferRef buf) { +VisitCollector::visit(uint32_t lid, ConstBufferRef buf) { if (buf.size() > 0) { _blobSet.append(lid, buf); } @@ -133,7 +141,7 @@ VisitCache::VisitCache(IDataStore &store, size_t cacheSize, const document::Comp } VisitCache::Cache::IdSet -VisitCache::Cache::findSetsContaining(const vespalib::LockGuard &, const KeySet & keys) const { +VisitCache::Cache::findSetsContaining(const LockGuard &, const KeySet & keys) const { IdSet found; for (uint32_t subKey : keys.getKeys()) { const auto foundLid = _lid2Id.find(subKey); @@ -160,7 +168,7 @@ VisitCache::Cache::readSet(const KeySet & key) } void -VisitCache::Cache::locateAndInvalidateOtherSubsets(const vespalib::LockGuard & cacheGuard, const KeySet & keys) +VisitCache::Cache::locateAndInvalidateOtherSubsets(const LockGuard & cacheGuard, const KeySet & keys) { // Due to the implementation of insert where the global lock is released and the fact // that 2 overlapping keysets kan have different keys and use different ValueLock diff --git a/searchlib/src/vespa/searchlib/docstore/visitcache.h b/searchlib/src/vespa/searchlib/docstore/visitcache.h index fb1b61310f8..518280bcc09 100644 --- a/searchlib/src/vespa/searchlib/docstore/visitcache.h +++ b/searchlib/src/vespa/searchlib/docstore/visitcache.h @@ -47,7 +47,7 @@ public: using Positions = std::vector<LidPosition>; BlobSet(); - BlobSet(const Positions & positions, vespalib::DefaultAlloc && buffer); + BlobSet(const Positions & positions, vespalib::alloc::Alloc && buffer); void append(uint32_t lid, vespalib::ConstBufferRef blob); void remove(uint32_t lid); const Positions & getPositions() const { return _positions; } diff --git a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp index c6d95ad79ad..3c7f5d283f7 100644 --- a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp +++ b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.cpp @@ -128,9 +128,9 @@ WriteableFileChunk::~WriteableFileChunk() } size_t -WriteableFileChunk::updateLidMap(ISetLid & ds, uint64_t serialNum) +WriteableFileChunk::updateLidMap(const LockGuard & guard, ISetLid & ds, uint64_t serialNum) { - size_t sz = FileChunk::updateLidMap(ds, serialNum); + size_t sz = FileChunk::updateLidMap(guard, ds, serialNum); _nextChunkId = _chunkInfo.size(); _active.reset( new Chunk(_nextChunkId++, Chunk::Config(_config.getMaxChunkBytes(), _config.getMaxChunkEntries()))); _serialNum = getLastPersistedSerialNum(); diff --git a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h index 97c6ad8d711..9ab07cd4935 100644 --- a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h +++ b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h @@ -72,7 +72,7 @@ public: size_t getDiskFootprint() const override; size_t getMemoryFootprint() const override; size_t getMemoryMetaFootprint() const override; - size_t updateLidMap(ISetLid & lidMap, uint64_t serialNum) override; + size_t updateLidMap(const LockGuard & guard, ISetLid & lidMap, uint64_t serialNum) override; void waitForDiskToCatchUpToNow() const; void flushPendingChunks(uint64_t serialNum); virtual DataStoreFileChunkStats getStats() const override; diff --git a/searchlib/src/vespa/searchlib/engine/searchreply.h b/searchlib/src/vespa/searchlib/engine/searchreply.h index 692806114ee..0e7ab43e66d 100644 --- a/searchlib/src/vespa/searchlib/engine/searchreply.h +++ b/searchlib/src/vespa/searchlib/engine/searchreply.h @@ -56,7 +56,7 @@ public: search::HitRank maxRank; std::vector<uint32_t> sortIndex; std::vector<char> sortData; - vespalib::Array<char, vespalib::DefaultAlloc> groupResult; + vespalib::Array<char> groupResult; bool useCoverage; Coverage coverage; bool useWideHits; diff --git a/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp b/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp index 51385a0b816..4035b0964af 100644 --- a/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp +++ b/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp @@ -3,8 +3,6 @@ #include <vespa/fastos/fastos.h> #include <vespa/log/log.h> LOG_SETUP(".features.dotproduct"); -#include <boost/algorithm/string/split.hpp> -#include <boost/algorithm/string/classification.hpp> #include <vespa/searchcommon/attribute/attributecontent.h> #include <vespa/searchlib/fef/properties.h> diff --git a/searchlib/src/vespa/searchlib/features/item_raw_score_feature.cpp b/searchlib/src/vespa/searchlib/features/item_raw_score_feature.cpp index a4dfc4821df..cf980940c82 100644 --- a/searchlib/src/vespa/searchlib/features/item_raw_score_feature.cpp +++ b/searchlib/src/vespa/searchlib/features/item_raw_score_feature.cpp @@ -63,7 +63,7 @@ ItemRawScoreBlueprint::createExecutor(const IQueryEnvironment &queryEnv) const } ItemRawScoreBlueprint::HandleVector -ItemRawScoreBlueprint::resolve(const search::fef::IQueryEnvironment &env, +ItemRawScoreBlueprint::resolve(const IQueryEnvironment &env, const vespalib::string &label) { HandleVector handles; diff --git a/searchlib/src/vespa/searchlib/features/item_raw_score_feature.h b/searchlib/src/vespa/searchlib/features/item_raw_score_feature.h index 10a6c30611d..d6a1914761e 100644 --- a/searchlib/src/vespa/searchlib/features/item_raw_score_feature.h +++ b/searchlib/src/vespa/searchlib/features/item_raw_score_feature.h @@ -4,57 +4,56 @@ #include <vespa/searchlib/fef/blueprint.h> #include <vespa/searchlib/fef/featureexecutor.h> -#include <vespa/vespalib/stllike/smallvector.h> namespace search { namespace features { -class ItemRawScoreExecutor : public search::fef::FeatureExecutor +class ItemRawScoreExecutor : public fef::FeatureExecutor { public: - typedef std::vector<search::fef::TermFieldHandle> HandleVector; + typedef std::vector<fef::TermFieldHandle> HandleVector; private: HandleVector _handles; public: ItemRawScoreExecutor(HandleVector handles) : FeatureExecutor(), _handles(handles) {} - virtual void execute(search::fef::MatchData &data); + virtual void execute(fef::MatchData &data); }; -class SimpleItemRawScoreExecutor : public search::fef::FeatureExecutor +class SimpleItemRawScoreExecutor : public fef::FeatureExecutor { private: - search::fef::TermFieldHandle _handle; + fef::TermFieldHandle _handle; public: - SimpleItemRawScoreExecutor(search::fef::TermFieldHandle handle) + SimpleItemRawScoreExecutor(fef::TermFieldHandle handle) : FeatureExecutor(), _handle(handle) {} - virtual void execute(search::fef::MatchData &data); + virtual void execute(fef::MatchData &data); }; //----------------------------------------------------------------------------- -class ItemRawScoreBlueprint : public search::fef::Blueprint +class ItemRawScoreBlueprint : public fef::Blueprint { private: - typedef std::vector<search::fef::TermFieldHandle> HandleVector; + typedef std::vector<fef::TermFieldHandle> HandleVector; vespalib::string _label; public: ItemRawScoreBlueprint() : Blueprint("itemRawScore"), _label() {} - virtual void visitDumpFeatures(const search::fef::IIndexEnvironment &, - search::fef::IDumpFeatureVisitor &) const {} - virtual search::fef::Blueprint::UP createInstance() const { + virtual void visitDumpFeatures(const fef::IIndexEnvironment &, + fef::IDumpFeatureVisitor &) const {} + virtual fef::Blueprint::UP createInstance() const { return Blueprint::UP(new ItemRawScoreBlueprint()); } - virtual search::fef::ParameterDescriptions getDescriptions() const { - return search::fef::ParameterDescriptions().desc().string(); + virtual fef::ParameterDescriptions getDescriptions() const { + return fef::ParameterDescriptions().desc().string(); } - virtual bool setup(const search::fef::IIndexEnvironment &env, - const search::fef::ParameterList ¶ms); - virtual search::fef::FeatureExecutor::LP - createExecutor(const search::fef::IQueryEnvironment &env) const; + virtual bool setup(const fef::IIndexEnvironment &env, + const fef::ParameterList ¶ms); + virtual fef::FeatureExecutor::LP + createExecutor(const fef::IQueryEnvironment &env) const; - static HandleVector resolve(const search::fef::IQueryEnvironment &env, + static HandleVector resolve(const fef::IQueryEnvironment &env, const vespalib::string &label); }; diff --git a/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.cpp b/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.cpp index 6c90d6d4148..0f2f22b4ef3 100644 --- a/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.cpp +++ b/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.cpp @@ -172,7 +172,7 @@ RankingExpressionBlueprint::setup(const fef::IIndexEnvironment &env, if (do_compile) { _compile_token = CompileCache::compile(rank_function, PassParams::ARRAY); } else { - _interpreted_function.reset(new InterpretedFunction(DefaultTensorEngine::ref(), rank_function)); + _interpreted_function.reset(new InterpretedFunction(DefaultTensorEngine::ref(), rank_function, node_types)); } } FeatureType output_type = do_compile diff --git a/searchlib/src/vespa/searchlib/fef/test/as_tensor.cpp b/searchlib/src/vespa/searchlib/fef/test/as_tensor.cpp index 4f604bb9f89..d9f23cb097f 100644 --- a/searchlib/src/vespa/searchlib/fef/test/as_tensor.cpp +++ b/searchlib/src/vespa/searchlib/fef/test/as_tensor.cpp @@ -9,6 +9,7 @@ #include <vespa/vespalib/eval/function.h> #include <iostream> +using vespalib::eval::NodeTypes; using vespalib::eval::Function; using vespalib::eval::ValueType; using vespalib::tensor::DefaultTensorEngine; @@ -19,7 +20,7 @@ namespace fef { namespace test { AsTensor::AsTensor(const vespalib::string &expr) - : ifun(DefaultTensorEngine::ref(), Function::parse(expr)), ctx(), result(&ifun.eval(ctx)) + : ifun(DefaultTensorEngine::ref(), Function::parse(expr), NodeTypes()), ctx(), result(&ifun.eval(ctx)) { ASSERT_TRUE(result->is_tensor()); tensor = static_cast<const Tensor *>(result->as_tensor()); diff --git a/searchlib/src/vespa/searchlib/grouping/collect.h b/searchlib/src/vespa/searchlib/grouping/collect.h index f2bdf014826..63b0950c460 100644 --- a/searchlib/src/vespa/searchlib/grouping/collect.h +++ b/searchlib/src/vespa/searchlib/grouping/collect.h @@ -7,8 +7,11 @@ namespace search { namespace grouping { -class Collect : public vespalib::noncopyable +class Collect { +public: + Collect(const Collect &) = delete; + Collect & operator = (const Collect &) = delete; protected: Collect(const aggregation::Group & protoType); ~Collect(); diff --git a/searchlib/src/vespa/searchlib/grouping/groupingengine.h b/searchlib/src/vespa/searchlib/grouping/groupingengine.h index 00187a0c818..32524f3ae73 100644 --- a/searchlib/src/vespa/searchlib/grouping/groupingengine.h +++ b/searchlib/src/vespa/searchlib/grouping/groupingengine.h @@ -7,11 +7,13 @@ namespace search { namespace grouping { -class GroupingEngine : private vespalib::noncopyable +class GroupingEngine { public: typedef std::vector<GroupEngine *> GroupEngines; public: + GroupingEngine(const GroupingEngine &) = delete; + GroupingEngine & operator = (const GroupingEngine &) = delete; GroupingEngine(aggregation::Grouping & request); GroupingEngine(vespalib::nbostream & request, bool oldWay); ~GroupingEngine(); diff --git a/searchlib/src/vespa/searchlib/grouping/sketch.h b/searchlib/src/vespa/searchlib/grouping/sketch.h index 0a475a9e805..e336955f337 100644 --- a/searchlib/src/vespa/searchlib/grouping/sketch.h +++ b/searchlib/src/vespa/searchlib/grouping/sketch.h @@ -226,19 +226,16 @@ decompress_buckets_from(char *buffer, uint32_t size) { memcpy(bucket, buffer, BUCKET_COUNT); } else { vespalib::ConstBufferRef compressed(buffer, size); - vespalib::DataBuffer uncompressed(reinterpret_cast<char *>(&bucket[0]), - BUCKET_COUNT); - document::decompress(document::CompressionConfig::LZ4, BUCKET_COUNT, - compressed, uncompressed, false); + vespalib::DataBuffer uncompressed(reinterpret_cast<char *>(&bucket[0]), BUCKET_COUNT); + document::decompress(document::CompressionConfig::LZ4, BUCKET_COUNT, compressed, uncompressed, false); } } template <int BucketBits, typename HashT> void NormalSketch<BucketBits, HashT>:: serialize(vespalib::Serializer &os) const { - vespalib::DefaultAlloc backing(LZ4_compressBound(BUCKET_COUNT)); + vespalib::alloc::Alloc backing(vespalib::DefaultAlloc::create(LZ4_compressBound(BUCKET_COUNT))); char * compress_array(static_cast<char *>(backing.get())); - uint32_t size = - compress_buckets_into(compress_array, backing.size()); + uint32_t size = compress_buckets_into(compress_array, backing.size()); os << BUCKET_COUNT << size; for (size_t i = 0; i < size; ++i) { os << static_cast<uint8_t>(compress_array[i]); diff --git a/searchlib/src/vespa/searchlib/memoryindex/fieldinverter.h b/searchlib/src/vespa/searchlib/memoryindex/fieldinverter.h index 1f72c8e62b4..47d1031d546 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/fieldinverter.h +++ b/searchlib/src/vespa/searchlib/memoryindex/fieldinverter.h @@ -98,7 +98,7 @@ private: FieldInverter &operator=(const FieldInverter &) = delete; FieldInverter &operator=(const FieldInverter &&) = delete; - typedef vespalib::Array<char, vespalib::DefaultAlloc> WordBuffer; + typedef vespalib::Array<char> WordBuffer; class ElemInfo { diff --git a/searchlib/src/vespa/searchlib/memoryindex/memoryfieldindex.cpp b/searchlib/src/vespa/searchlib/memoryindex/memoryfieldindex.cpp index 88b718e1860..ba0c8e95efa 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/memoryfieldindex.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/memoryfieldindex.cpp @@ -173,7 +173,7 @@ MemoryFieldIndex::dump(search::index::IndexBuilder & indexBuilder) vespalib::stringref word; FeatureStore::DecodeContextCooked decoder(NULL); DocIdAndFeatures features; - vespalib::Array<uint32_t, vespalib::DefaultAlloc> wordMap(_numUniqueWords + 1, 0); + vespalib::Array<uint32_t> wordMap(_numUniqueWords + 1, 0); _featureStore.setupForField(_fieldId, decoder); for (DictionaryTree::Iterator itr = _dict.begin(); itr.valid(); ++itr) { const WordKey & wk = itr.getKey(); diff --git a/searchlib/src/vespa/searchlib/parsequery/simplequerystack.cpp b/searchlib/src/vespa/searchlib/parsequery/simplequerystack.cpp index 146b4aeeff4..06b92a429d8 100644 --- a/searchlib/src/vespa/searchlib/parsequery/simplequerystack.cpp +++ b/searchlib/src/vespa/searchlib/parsequery/simplequerystack.cpp @@ -14,6 +14,8 @@ #include <vespa/vespalib/objects/nbostream.h> #include <vespa/searchlib/parsequery/simplequerystack.h> +LOG_SETUP(".search.simplequerystack"); + using vespalib::make_vespa_string; namespace search { @@ -319,6 +321,7 @@ SimpleQueryStack::StackbufToString(const vespalib::stringref &theBuf) } default: + LOG(error, "Unhandled type %d", type); abort(); } } diff --git a/searchlib/src/vespa/searchlib/parsequery/stackdumpiterator.h b/searchlib/src/vespa/searchlib/parsequery/stackdumpiterator.h index 451ea226d86..4ccd7660a0c 100644 --- a/searchlib/src/vespa/searchlib/parsequery/stackdumpiterator.h +++ b/searchlib/src/vespa/searchlib/parsequery/stackdumpiterator.h @@ -84,6 +84,9 @@ public: SimpleQueryStackDumpIterator(const vespalib::stringref &buf); ~SimpleQueryStackDumpIterator(); + vespalib::stringref getStack() const { return vespalib::stringref(_buf, _bufLen); } + size_t getPosition() const { return _currPos - _buf; } + /** * Moves to the next item in the buffer. * @@ -149,16 +152,8 @@ public: query::PredicateQueryTerm::UP getPredicateQueryTerm() { return std::move(_predicate_query_term); } - /** - * Get the type of the current item. - * @return the type. - */ - void getIndexName(const char **buf, size_t *buflen) const { *buf = _currIndexName; *buflen = _currIndexNameLen; } - /** - * Get the type of the current item. - * @return the type. - */ - void getTerm(const char **buf, size_t *buflen) const { *buf = _currTerm; *buflen = _currTermLen; } + vespalib::stringref getIndexName() const { return vespalib::stringref(_currIndexName, _currIndexNameLen); } + vespalib::stringref getTerm() const { return vespalib::stringref(_currTerm, _currTermLen); } }; } diff --git a/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp b/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp index 5e5e1617c10..c371b4be991 100644 --- a/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp +++ b/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp @@ -16,7 +16,7 @@ LOG_SETUP(".document_features_store"); using search::btree::BTreeNoLeafData; using search::btree::EntryRef; -using vespalib::MMapDataBuffer; +using vespalib::DataBuffer; using vespalib::stringref; using std::unordered_map; using std::vector; @@ -44,7 +44,7 @@ DocumentFeaturesStore::DocumentFeaturesStore(uint32_t arity) namespace { template <typename KeyComp, typename WordIndex> -void deserializeWords(MMapDataBuffer &buffer, +void deserializeWords(DataBuffer &buffer, memoryindex::WordStore &word_store, WordIndex &word_index, vector<EntryRef> &word_refs) { @@ -63,7 +63,7 @@ void deserializeWords(MMapDataBuffer &buffer, } template <typename RangeFeaturesMap> -void deserializeRanges(MMapDataBuffer &buffer, vector<EntryRef> &word_refs, +void deserializeRanges(DataBuffer &buffer, vector<EntryRef> &word_refs, RangeFeaturesMap &ranges, size_t &num_ranges) { typedef typename RangeFeaturesMap::mapped_type::value_type Range; uint32_t ranges_size = buffer.readInt32(); @@ -84,7 +84,7 @@ void deserializeRanges(MMapDataBuffer &buffer, vector<EntryRef> &word_refs, } template <typename DocumentFeaturesMap> -void deserializeDocs(MMapDataBuffer &buffer, DocumentFeaturesMap &docs, +void deserializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs, size_t &num_features) { uint32_t docs_size = buffer.readInt32(); for (uint32_t i = 0; i < docs_size; ++i) { @@ -100,7 +100,7 @@ void deserializeDocs(MMapDataBuffer &buffer, DocumentFeaturesMap &docs, } } // namespace -DocumentFeaturesStore::DocumentFeaturesStore(MMapDataBuffer &buffer) +DocumentFeaturesStore::DocumentFeaturesStore(DataBuffer &buffer) : DocumentFeaturesStore(0) { _arity = buffer.readInt16(); @@ -238,7 +238,7 @@ void findUsedWords(const RangeFeaturesMap &ranges, } } -void serializeWords(MMapDataBuffer &buffer, const vector<EntryRef> &word_list, +void serializeWords(DataBuffer &buffer, const vector<EntryRef> &word_list, const memoryindex::WordStore &word_store) { buffer.writeInt32(word_list.size()); for (const auto &word_ref : word_list) { @@ -250,7 +250,7 @@ void serializeWords(MMapDataBuffer &buffer, const vector<EntryRef> &word_list, } template <typename RangeFeaturesMap> -void serializeRanges(MMapDataBuffer &buffer, RangeFeaturesMap &ranges, +void serializeRanges(DataBuffer &buffer, RangeFeaturesMap &ranges, unordered_map<uint32_t, uint32_t> &word_map) { buffer.writeInt32(ranges.size()); for (const auto &range_features_entry : ranges) { @@ -265,7 +265,7 @@ void serializeRanges(MMapDataBuffer &buffer, RangeFeaturesMap &ranges, } template <typename DocumentFeaturesMap> -void serializeDocs(MMapDataBuffer &buffer, DocumentFeaturesMap &docs) { +void serializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs) { buffer.writeInt32(docs.size()); for (const auto &doc_features_entry : docs) { buffer.writeInt32(doc_features_entry.first); // doc id @@ -277,7 +277,7 @@ void serializeDocs(MMapDataBuffer &buffer, DocumentFeaturesMap &docs) { } } // namespace -void DocumentFeaturesStore::serialize(MMapDataBuffer &buffer) const { +void DocumentFeaturesStore::serialize(DataBuffer &buffer) const { vector<EntryRef> word_list; unordered_map<uint32_t, uint32_t> word_map; diff --git a/searchlib/src/vespa/searchlib/predicate/document_features_store.h b/searchlib/src/vespa/searchlib/predicate/document_features_store.h index 314e7347f27..8d910f8506c 100644 --- a/searchlib/src/vespa/searchlib/predicate/document_features_store.h +++ b/searchlib/src/vespa/searchlib/predicate/document_features_store.h @@ -72,7 +72,7 @@ public: typedef std::unordered_set<uint64_t> FeatureSet; DocumentFeaturesStore(uint32_t arity); - DocumentFeaturesStore(vespalib::MMapDataBuffer &buffer); + DocumentFeaturesStore(vespalib::DataBuffer &buffer); ~DocumentFeaturesStore(); void insert(uint64_t featureId, uint32_t docId); @@ -81,7 +81,7 @@ public: void remove(uint32_t docId); search::MemoryUsage getMemoryUsage() const; - void serialize(vespalib::MMapDataBuffer &buffer) const; + void serialize(vespalib::DataBuffer &buffer) const; }; } // namespace predicate diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp index 81533c36951..6e597d55f3b 100644 --- a/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp +++ b/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp @@ -12,7 +12,7 @@ LOG_SETUP(".predicate_index"); using search::btree::EntryRef; -using vespalib::MMapDataBuffer; +using vespalib::DataBuffer; using std::vector; namespace search { @@ -63,7 +63,7 @@ class IntervalSerializer : public PostingSerializer<EntryRef> { public: IntervalSerializer(const PredicateIntervalStore &store) : _store(store) {} virtual void serialize(const EntryRef &ref, - vespalib::MMapDataBuffer &buffer) const { + vespalib::DataBuffer &buffer) const { uint32_t size; IntervalT single_buf; const IntervalT *interval = _store.get(ref, size, &single_buf); @@ -81,7 +81,7 @@ class IntervalDeserializer : public PostingDeserializer<EntryRef> { PredicateIntervalStore &_store; public: IntervalDeserializer(PredicateIntervalStore &store) : _store(store) {} - virtual EntryRef deserialize(vespalib::MMapDataBuffer &buffer) { + virtual EntryRef deserialize(vespalib::DataBuffer &buffer) { std::vector<IntervalT> intervals; size_t size = buffer.readInt16(); for (uint32_t i = 0; i < size; ++i) { @@ -95,7 +95,7 @@ public: PredicateIndex::PredicateIndex(GenerationHandler &generation_handler, GenerationHolder &genHolder, const DocIdLimitProvider &limit_provider, - const SimpleIndexConfig &simple_index_config, MMapDataBuffer &buffer, + const SimpleIndexConfig &simple_index_config, DataBuffer &buffer, SimpleIndexDeserializeObserver<> & observer, uint32_t version) : _arity(0), _generation_handler(generation_handler), @@ -125,7 +125,7 @@ PredicateIndex::PredicateIndex(GenerationHandler &generation_handler, Generation commit(); } -void PredicateIndex::serialize(MMapDataBuffer &buffer) const { +void PredicateIndex::serialize(DataBuffer &buffer) const { _features_store.serialize(buffer); buffer.writeInt16(_arity); buffer.writeInt32(_zero_constraint_docs.size()); diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_index.h b/searchlib/src/vespa/searchlib/predicate/predicate_index.h index d2e5c1f268e..3673f4378ad 100644 --- a/searchlib/src/vespa/searchlib/predicate/predicate_index.h +++ b/searchlib/src/vespa/searchlib/predicate/predicate_index.h @@ -82,10 +82,10 @@ public: // The observer can be used to gain some insight into what has been added to the index.. PredicateIndex(GenerationHandler &generation_handler, GenerationHolder &genHolder, const DocIdLimitProvider &limit_provider, - const SimpleIndexConfig &simple_index_config, vespalib::MMapDataBuffer &buffer, + const SimpleIndexConfig &simple_index_config, vespalib::DataBuffer &buffer, SimpleIndexDeserializeObserver<> & observer, uint32_t version); - void serialize(vespalib::MMapDataBuffer &buffer) const; + void serialize(vespalib::DataBuffer &buffer) const; void onDeserializationCompleted(); void indexEmptyDocument(uint32_t doc_id); diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval.h b/searchlib/src/vespa/searchlib/predicate/predicate_interval.h index fede659582a..3a288c4aedb 100644 --- a/searchlib/src/vespa/searchlib/predicate/predicate_interval.h +++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval.h @@ -16,10 +16,10 @@ struct Interval { Interval() : interval(0) {} Interval(uint32_t interval_) : interval(interval_) {} - void serialize(vespalib::MMapDataBuffer &buffer) const { + void serialize(vespalib::DataBuffer &buffer) const { buffer.writeInt32(interval); } - static Interval deserialize(vespalib::MMapDataBuffer &buffer) { + static Interval deserialize(vespalib::DataBuffer &buffer) { return Interval{buffer.readInt32()}; } bool operator==(const Interval &other) const { @@ -42,11 +42,11 @@ struct IntervalWithBounds { IntervalWithBounds() : interval(0), bounds(0) {} IntervalWithBounds(uint32_t interval_, uint32_t bounds_) : interval(interval_), bounds(bounds_) {} - void serialize(vespalib::MMapDataBuffer &buffer) const { + void serialize(vespalib::DataBuffer &buffer) const { buffer.writeInt32(interval); buffer.writeInt32(bounds); } - static IntervalWithBounds deserialize(vespalib::MMapDataBuffer &buffer) { + static IntervalWithBounds deserialize(vespalib::DataBuffer &buffer) { uint32_t interval = buffer.readInt32(); uint32_t bounds = buffer.readInt32(); return IntervalWithBounds{interval, bounds}; diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.h b/searchlib/src/vespa/searchlib/predicate/simple_index.h index be6fc098682..b61f31f79a0 100644 --- a/searchlib/src/vespa/searchlib/predicate/simple_index.h +++ b/searchlib/src/vespa/searchlib/predicate/simple_index.h @@ -31,13 +31,13 @@ template <typename Posting> struct PostingSerializer { virtual ~PostingSerializer() {} virtual void serialize(const Posting &posting, - vespalib::MMapDataBuffer &buffer) const = 0; + vespalib::DataBuffer &buffer) const = 0; }; template <typename Posting> struct PostingDeserializer { virtual ~PostingDeserializer() {} - virtual Posting deserialize(vespalib::MMapDataBuffer &buffer) = 0; + virtual Posting deserialize(vespalib::DataBuffer &buffer) = 0; }; struct DocIdLimitProvider { @@ -195,9 +195,9 @@ public: : _generation_holder(generation_holder), _config(config), _limit_provider(provider) {} ~SimpleIndex(); - void serialize(vespalib::MMapDataBuffer &buffer, + void serialize(vespalib::DataBuffer &buffer, const PostingSerializer<Posting> &serializer) const; - void deserialize(vespalib::MMapDataBuffer &buffer, + void deserialize(vespalib::DataBuffer &buffer, PostingDeserializer<Posting> &deserializer, SimpleIndexDeserializeObserver<Key, DocId> &observer, uint32_t version); diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.hpp b/searchlib/src/vespa/searchlib/predicate/simple_index.hpp index 10ba3e79a02..ca597708735 100644 --- a/searchlib/src/vespa/searchlib/predicate/simple_index.hpp +++ b/searchlib/src/vespa/searchlib/predicate/simple_index.hpp @@ -69,7 +69,7 @@ SimpleIndex<Posting, Key, DocId>::~SimpleIndex() { template <typename Posting, typename Key, typename DocId> void SimpleIndex<Posting, Key, DocId>::serialize( - vespalib::MMapDataBuffer &buffer, + vespalib::DataBuffer &buffer, const PostingSerializer<Posting> &serializer) const { assert(sizeof(Key) <= sizeof(uint64_t)); assert(sizeof(DocId) <= sizeof(uint32_t)); @@ -90,7 +90,7 @@ void SimpleIndex<Posting, Key, DocId>::serialize( template <typename Posting, typename Key, typename DocId> void SimpleIndex<Posting, Key, DocId>::deserialize( - vespalib::MMapDataBuffer &buffer, + vespalib::DataBuffer &buffer, PostingDeserializer<Posting> &deserializer, SimpleIndexDeserializeObserver<Key, DocId> &observer, uint32_t version) { typename Dictionary::Builder builder(_dictionary.getAllocator()); diff --git a/searchlib/src/vespa/searchlib/query/querynode.cpp b/searchlib/src/vespa/searchlib/query/querynode.cpp index c9428e513d6..17df761af83 100644 --- a/searchlib/src/vespa/searchlib/query/querynode.cpp +++ b/searchlib/src/vespa/searchlib/query/querynode.cpp @@ -61,10 +61,7 @@ QueryNode::UP QueryNode::Build(const QueryNode * parent, const QueryNodeResultBa (type == search::ParseItem::ITEM_DOT_PRODUCT) || (type == search::ParseItem::ITEM_WAND)) { - const char * index; - size_t indexLen(0); - queryRep.getIndexName(&index, &indexLen); - qn->setIndex(vespalib::string(index, indexLen)); + qn->setIndex(queryRep.getIndexName()); } for (size_t i=0; i < arity; i++) { queryRep.next(); @@ -91,22 +88,15 @@ QueryNode::UP QueryNode::Build(const QueryNode * parent, const QueryNodeResultBa case search::ParseItem::ITEM_PURE_WEIGHTED_STRING: case search::ParseItem::ITEM_PURE_WEIGHTED_LONG: { - const char * index; - size_t indexLen(0); - queryRep.getIndexName(&index, &indexLen); - if (indexLen == 0) { + vespalib::stringref index = queryRep.getIndexName(); + if (index.empty()) { if ((type == search::ParseItem::ITEM_PURE_WEIGHTED_STRING) || (type == search::ParseItem::ITEM_PURE_WEIGHTED_LONG)) { - const vespalib::string & ref = parent->getIndex(); - index = ref.c_str(); - indexLen = ref.size(); + index = parent->getIndex(); } else { - index = "default"; - indexLen = strlen(index); + index = DEFAULT; } } - const char * term; - size_t termLen(0); - queryRep.getTerm(&term, &termLen); + vespalib::stringref term = queryRep.getTerm(); QueryTerm::SearchTerm sTerm(QueryTerm::WORD); switch (type) { case search::ParseItem::ITEM_REGEXP: @@ -127,8 +117,8 @@ QueryNode::UP QueryNode::Build(const QueryNode * parent, const QueryNodeResultBa default: break; } - QueryTerm::string ssTerm(term, termLen); - QueryTerm::string ssIndex(index, indexLen); + QueryTerm::string ssTerm(term); + QueryTerm::string ssIndex(index); if (ssIndex == "sddocname") { // This is suboptimal as the term should be checked too. // But it will do for now as only correct sddocname queries are sent down. diff --git a/searchlib/src/vespa/searchlib/query/queryterm.cpp b/searchlib/src/vespa/searchlib/query/queryterm.cpp index b3d3768f42a..96593b9a0e2 100644 --- a/searchlib/src/vespa/searchlib/query/queryterm.cpp +++ b/searchlib/src/vespa/searchlib/query/queryterm.cpp @@ -5,7 +5,7 @@ #include <vespa/searchlib/query/queryterm.h> #include <vespa/vespalib/objects/visit.h> #include <vespa/vespalib/text/utf8.h> -#include <cxxabi.h> +#include <vespa/vespalib/util/classname.h> namespace { @@ -439,13 +439,7 @@ QueryTermSimple::getAsNumericTerm(T & lower, T & upper, D d) const vespalib::string QueryTermSimple::getClassName() const { - vespalib::string name(typeid(*this).name()); - int status = 0; - size_t size = 0; - char *unmangled = abi::__cxa_demangle(name.c_str(), 0, &size, &status); - vespalib::string result(unmangled); - free(unmangled); - return result; + return vespalib::getClassName(*this); } } diff --git a/searchlib/src/vespa/searchlib/query/tree/querybuilder.cpp b/searchlib/src/vespa/searchlib/query/tree/querybuilder.cpp index ae8c2012049..2b5dfe078c9 100644 --- a/searchlib/src/vespa/searchlib/query/tree/querybuilder.cpp +++ b/searchlib/src/vespa/searchlib/query/tree/querybuilder.cpp @@ -2,21 +2,31 @@ #include <vespa/fastos/fastos.h> #include <vespa/log/log.h> -LOG_SETUP(".querybuilder"); - #include "querybuilder.h" - #include "intermediate.h" +#include <vespa/vespalib/util/classname.h> +#include <vespa/vespalib/util/stringfmt.h> + +LOG_SETUP(".querybuilder"); using vespalib::string; +using vespalib::make_string; +using vespalib::getClassName; + using namespace search::query; -void QueryBuilderBase::reportError(const vespalib::string &msg) { +void QueryBuilderBase::reportError(const string &msg) { if (!hasError()) { _error_msg = msg; } } + +void QueryBuilderBase::reportError(const string &msg, const Node & incomming, const Node & root) { + reportError(make_string("%s: QueryBuilder got invalid node structure. Incomming node is '%s', while root is non-null('%s')", + msg.c_str(), getClassName(incomming).c_str(), getClassName(root).c_str())); +} + QueryBuilderBase::QueryBuilderBase() : _root(), _nodes(), @@ -35,11 +45,11 @@ void QueryBuilderBase::addCompleteNode(Node *n) return; } if (_nodes.empty()) { - if (!_root.get()) { + if (!_root) { _root = std::move(node); - return; + } else { + reportError("QueryBuilderBase::addCompleteNode", *node, *_root); } - reportError("QueryBuilder got invalid node structure."); return; } @@ -56,8 +66,8 @@ void QueryBuilderBase::addIntermediateNode(Intermediate *n, int child_count) { Intermediate::UP node(n); if (!hasError()) { - if (_root.get()) { - reportError("QueryBuilder got invalid node structure."); + if (_root) { + reportError("QueryBuilderBase::addIntermediateNode", *node, *_root); } else { node->reserve(child_count); WeightOverride weight_override; @@ -76,16 +86,16 @@ void QueryBuilderBase::addIntermediateNode(Intermediate *n, int child_count) } void QueryBuilderBase::setWeightOverride(const Weight &weight) { - assert(!_nodes.empty()); - _nodes.top().weight_override = WeightOverride(weight); + if ( !hasError() ) { + _nodes.top().weight_override = WeightOverride(weight); + } } Node::UP QueryBuilderBase::build() { - if (!_root.get()) { - reportError("Trying to build incomplete query tree."); - } if (!_nodes.empty()) { - reportError("QueryBuilder got invalid node structure."); + reportError("QueryBuilderBase::build: QueryBuilder got invalid node structure. _nodes are not empty."); + } else if (!_root) { + reportError("QueryBuilderBase::build: Trying to build incomplete query tree."); } if (hasError()) { return Node::UP(); diff --git a/searchlib/src/vespa/searchlib/query/tree/querybuilder.h b/searchlib/src/vespa/searchlib/query/tree/querybuilder.h index b5cbdb07a13..934d1c9dff3 100644 --- a/searchlib/src/vespa/searchlib/query/tree/querybuilder.h +++ b/searchlib/src/vespa/searchlib/query/tree/querybuilder.h @@ -53,6 +53,7 @@ class QueryBuilderBase vespalib::string _error_msg; void reportError(const vespalib::string &msg); + void reportError(const vespalib::string &msg, const Node & incomming, const Node & root); protected: QueryBuilderBase(); @@ -119,29 +120,26 @@ typename NodeTypes::Equiv *createEquiv(int32_t id, Weight weight) { return new typename NodeTypes::Equiv(id, weight); } template <class NodeTypes> -typename NodeTypes::Phrase *createPhrase( - const vespalib::stringref &view, int32_t id, Weight weight) { +typename NodeTypes::Phrase *createPhrase(const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::Phrase(view, id, weight); } template <class NodeTypes> -typename NodeTypes::WeightedSetTerm *createWeightedSetTerm( - const vespalib::stringref &view, int32_t id, Weight weight) { +typename NodeTypes::WeightedSetTerm *createWeightedSetTerm(const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::WeightedSetTerm(view, id, weight); } template <class NodeTypes> -typename NodeTypes::DotProduct *createDotProduct( - const vespalib::stringref &view, int32_t id, Weight weight) { +typename NodeTypes::DotProduct *createDotProduct(const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::DotProduct(view, id, weight); } template <class NodeTypes> -typename NodeTypes::WandTerm *createWandTerm( - const vespalib::stringref &view, int32_t id, Weight weight, - uint32_t targetNumHits, int64_t scoreThreshold, double thresholdBoostFactor) { - return new typename NodeTypes::WandTerm(view, id, weight, - targetNumHits, scoreThreshold, thresholdBoostFactor); +typename NodeTypes::WandTerm * +createWandTerm(const vespalib::stringref &view, int32_t id, Weight weight, uint32_t targetNumHits, int64_t scoreThreshold, double thresholdBoostFactor) { + return new typename NodeTypes::WandTerm(view, id, weight, targetNumHits, scoreThreshold, thresholdBoostFactor); } template <class NodeTypes> -typename NodeTypes::Rank *createRank() { return new typename NodeTypes::Rank; } +typename NodeTypes::Rank *createRank() { + return new typename NodeTypes::Rank; +} template <class NodeTypes> typename NodeTypes::Near *createNear(size_t distance) { @@ -154,69 +152,51 @@ typename NodeTypes::ONear *createONear(size_t distance) { // Term nodes template <class NodeTypes> -typename NodeTypes::NumberTerm *createNumberTerm( - const vespalib::stringref &term, const vespalib::stringref &view, int32_t id, Weight weight) -{ +typename NodeTypes::NumberTerm * +createNumberTerm(const vespalib::stringref &term, const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::NumberTerm(term, view, id, weight); } template <class NodeTypes> -typename NodeTypes::PrefixTerm *createPrefixTerm( - const vespalib::stringref &term, const vespalib::stringref &view, - int32_t id, Weight weight) -{ +typename NodeTypes::PrefixTerm * +createPrefixTerm(const vespalib::stringref &term, const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::PrefixTerm(term, view, id, weight); } template <class NodeTypes> -typename NodeTypes::RangeTerm *createRangeTerm( - const Range &term, const vespalib::stringref &view, - int32_t id, Weight weight) -{ +typename NodeTypes::RangeTerm * +createRangeTerm(const Range &term, const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::RangeTerm(term, view, id, weight); } template <class NodeTypes> -typename NodeTypes::StringTerm *createStringTerm( - const vespalib::stringref &term, const vespalib::stringref &view, - int32_t id, Weight weight) -{ +typename NodeTypes::StringTerm * +createStringTerm(const vespalib::stringref &term, const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::StringTerm(term, view, id, weight); } template <class NodeTypes> -typename NodeTypes::SubstringTerm *createSubstringTerm( - const vespalib::stringref &term, const vespalib::stringref &view, - int32_t id, Weight weight) -{ +typename NodeTypes::SubstringTerm * +createSubstringTerm(const vespalib::stringref &term, const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::SubstringTerm(term, view, id, weight); } template <class NodeTypes> -typename NodeTypes::SuffixTerm *createSuffixTerm( - const vespalib::stringref &term, const vespalib::stringref &view, - int32_t id, Weight weight) -{ +typename NodeTypes::SuffixTerm * +createSuffixTerm(const vespalib::stringref &term, const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::SuffixTerm(term, view, id, weight); } template <class NodeTypes> -typename NodeTypes::LocationTerm *createLocationTerm( - const Location &loc, const vespalib::stringref &view, - int32_t id, Weight weight) -{ +typename NodeTypes::LocationTerm * +createLocationTerm(const Location &loc, const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::LocationTerm(loc, view, id, weight); } template <class NodeTypes> -typename NodeTypes::PredicateQuery *createPredicateQuery( - PredicateQueryTerm::UP term, const vespalib::stringref &view, - int32_t id, Weight weight) -{ - return new typename NodeTypes::PredicateQuery( - std::move(term), view, id, weight); +typename NodeTypes::PredicateQuery * +createPredicateQuery(PredicateQueryTerm::UP term, const vespalib::stringref &view, int32_t id, Weight weight) { + return new typename NodeTypes::PredicateQuery(std::move(term), view, id, weight); } template <class NodeTypes> -typename NodeTypes::RegExpTerm *createRegExpTerm( - const vespalib::stringref &term, const vespalib::stringref &view, - int32_t id, Weight weight) -{ +typename NodeTypes::RegExpTerm * +createRegExpTerm(const vespalib::stringref &term, const vespalib::stringref &view, int32_t id, Weight weight) { return new typename NodeTypes::RegExpTerm(term, view, id, weight); } @@ -235,6 +215,7 @@ class QueryBuilder : public QueryBuilderBase { } public: + using stringref = vespalib::stringref; typename NodeTypes::And &addAnd(int child_count) { return addIntermediate(createAnd<NodeTypes>(), child_count); } @@ -250,45 +231,36 @@ public: typename NodeTypes::Or &addOr(int child_count) { return addIntermediate(createOr<NodeTypes>(), child_count); } - typename NodeTypes::WeakAnd &addWeakAnd(int child_count, uint32_t minHits, const vespalib::stringref & view) { + typename NodeTypes::WeakAnd &addWeakAnd(int child_count, uint32_t minHits, const stringref & view) { return addIntermediate(createWeakAnd<NodeTypes>(minHits, view), child_count); } typename NodeTypes::Equiv &addEquiv(int child_count, int32_t id, Weight weight) { return addIntermediate(createEquiv<NodeTypes>(id, weight), child_count); } - typename NodeTypes::Phrase &addPhrase( - int child_count, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::Phrase &addPhrase(int child_count, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); - typename NodeTypes::Phrase &node = addIntermediate( - createPhrase<NodeTypes>(view, id, weight), child_count); + typename NodeTypes::Phrase &node = addIntermediate(createPhrase<NodeTypes>(view, id, weight), child_count); setWeightOverride(weight); return node; } - typename NodeTypes::WeightedSetTerm &addWeightedSetTerm( - int child_count, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::WeightedSetTerm &addWeightedSetTerm( int child_count, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); - typename NodeTypes::WeightedSetTerm &node = addIntermediate( - createWeightedSetTerm<NodeTypes>(view, id, weight), child_count); + typename NodeTypes::WeightedSetTerm &node = addIntermediate(createWeightedSetTerm<NodeTypes>(view, id, weight), child_count); return node; } - typename NodeTypes::DotProduct &addDotProduct( - int child_count, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::DotProduct &addDotProduct( int child_count, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); - typename NodeTypes::DotProduct &node = addIntermediate( - createDotProduct<NodeTypes>(view, id, weight), child_count); + typename NodeTypes::DotProduct &node = addIntermediate( createDotProduct<NodeTypes>(view, id, weight), child_count); return node; } typename NodeTypes::WandTerm &addWandTerm( - int child_count, const vespalib::stringref &view, + int child_count, const stringref &view, int32_t id, Weight weight, uint32_t targetNumHits, - int64_t scoreThreshold, double thresholdBoostFactor) { + int64_t scoreThreshold, double thresholdBoostFactor) + { adjustWeight(weight); typename NodeTypes::WandTerm &node = addIntermediate( - createWandTerm<NodeTypes>(view, id, weight, - targetNumHits, scoreThreshold, thresholdBoostFactor), + createWandTerm<NodeTypes>(view, id, weight, targetNumHits, scoreThreshold, thresholdBoostFactor), child_count); return node; } @@ -296,58 +268,39 @@ public: return addIntermediate(createRank<NodeTypes>(), child_count); } - typename NodeTypes::NumberTerm &addNumberTerm( - const vespalib::stringref &term, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::NumberTerm &addNumberTerm(const stringref &term, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); return addTerm(createNumberTerm<NodeTypes>(term, view, id, weight)); } - typename NodeTypes::PrefixTerm &addPrefixTerm( - const vespalib::stringref &term, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::PrefixTerm &addPrefixTerm(const stringref &term, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); return addTerm(createPrefixTerm<NodeTypes>(term, view, id, weight)); } - typename NodeTypes::RangeTerm &addRangeTerm( - const Range &range, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::RangeTerm &addRangeTerm(const Range &range, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); return addTerm(createRangeTerm<NodeTypes>(range, view, id, weight)); } - typename NodeTypes::StringTerm &addStringTerm( - const vespalib::stringref &term, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::StringTerm &addStringTerm(const stringref &term, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); return addTerm(createStringTerm<NodeTypes>(term, view, id, weight)); } - typename NodeTypes::SubstringTerm &addSubstringTerm( - const vespalib::stringref &t, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::SubstringTerm &addSubstringTerm(const stringref &t, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); return addTerm(createSubstringTerm<NodeTypes>(t, view, id, weight)); } - typename NodeTypes::SuffixTerm &addSuffixTerm( - const vespalib::stringref &term, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::SuffixTerm &addSuffixTerm(const stringref &term, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); return addTerm(createSuffixTerm<NodeTypes>(term, view, id, weight)); } - typename NodeTypes::LocationTerm &addLocationTerm( - const Location &loc, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::LocationTerm &addLocationTerm(const Location &loc, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); return addTerm(createLocationTerm<NodeTypes>(loc, view, id, weight)); } - typename NodeTypes::PredicateQuery &addPredicateQuery( - PredicateQueryTerm::UP term, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::PredicateQuery &addPredicateQuery(PredicateQueryTerm::UP term, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); - return addTerm(createPredicateQuery<NodeTypes>( - std::move(term), view, id, weight)); + return addTerm(createPredicateQuery<NodeTypes>(std::move(term), view, id, weight)); } - typename NodeTypes::RegExpTerm &addRegExpTerm( - const vespalib::stringref &term, const vespalib::stringref &view, - int32_t id, Weight weight) { + typename NodeTypes::RegExpTerm &addRegExpTerm(const stringref &term, const stringref &view, int32_t id, Weight weight) { adjustWeight(weight); return addTerm(createRegExpTerm<NodeTypes>(term, view, id, weight)); } diff --git a/searchlib/src/vespa/searchlib/query/tree/stackdumpquerycreator.h b/searchlib/src/vespa/searchlib/query/tree/stackdumpquerycreator.h index c3b10aae05d..246fd288b0d 100644 --- a/searchlib/src/vespa/searchlib/query/tree/stackdumpquerycreator.h +++ b/searchlib/src/vespa/searchlib/query/tree/stackdumpquerycreator.h @@ -6,8 +6,9 @@ #include "querybuilder.h" #include "term.h" #include <vespa/vespalib/stllike/asciistream.h> -#include <string> #include <vespa/searchlib/parsequery/stackdumpiterator.h> +#include <vespa/searchlib/parsequery/simplequerystack.h> +#include <vespa/vespalib/objects/nbostream.h> #include <algorithm> namespace search { @@ -18,23 +19,6 @@ namespace query { */ template <class NodeTypes> class StackDumpQueryCreator { -private: - /** - * If changing this class note: - * Note that this method must return a reference into the existing querystack. - * This is necessary to use the non-copying stringref noted in the create method. - */ - static vespalib::stringref readString( - SimpleQueryStackDumpIterator &queryStack, - void (SimpleQueryStackDumpIterator::*f)(const char **, - size_t *) const) - { - const char *p; - size_t len; - (queryStack.*f)(&p, &len); - return vespalib::stringref(p, len); - } - public: static Node::UP create(search::SimpleQueryStackDumpIterator &queryStack) { @@ -44,115 +28,9 @@ public: // Especially make sure that do not create any stack local objects like vespalib::string // with smaller scope, that you refer with pureTermView. vespalib::stringref pureTermView; - while (queryStack.next()) { - uint32_t arity = queryStack.getArity(); - uint32_t arg1 = queryStack.getArg1(); - double arg2 = queryStack.getArg2(); - double arg3 = queryStack.getArg3(); - ParseItem::ItemType type = queryStack.getType(); - Node::UP node; - Term *t = 0; - if (type == ParseItem::ITEM_AND) { - builder.addAnd(arity); - } else if (type == ParseItem::ITEM_RANK) { - builder.addRank(arity); - } else if (type == ParseItem::ITEM_OR) { - builder.addOr(arity); - } else if (type == ParseItem::ITEM_WORD_ALTERNATIVES) { - vespalib::stringref view = readString(queryStack, - &SimpleQueryStackDumpIterator::getIndexName); - int32_t id = queryStack.getUniqueId(); - Weight weight = queryStack.GetWeight(); - builder.addEquiv(arity, id, weight); - pureTermView = view; - } else if (type == ParseItem::ITEM_WEAK_AND) { - vespalib::stringref view = readString(queryStack, - &SimpleQueryStackDumpIterator::getIndexName); - builder.addWeakAnd(arity, arg1, view); - pureTermView = view; - } else if (type == ParseItem::ITEM_EQUIV) { - int32_t id = queryStack.getUniqueId(); - Weight weight = queryStack.GetWeight(); - builder.addEquiv(arity, id, weight); - } else if (type == ParseItem::ITEM_NEAR) { - builder.addNear(arity, arg1); - } else if (type == ParseItem::ITEM_ONEAR) { - builder.addONear(arity, arg1); - } else if (type == ParseItem::ITEM_PHRASE) { - vespalib::stringref view = readString(queryStack, - &SimpleQueryStackDumpIterator::getIndexName); - int32_t id = queryStack.getUniqueId(); - Weight weight = queryStack.GetWeight(); - t = &builder.addPhrase(arity, view, id, weight); - pureTermView = view; - } else if (type == ParseItem::ITEM_WEIGHTED_SET) { - vespalib::stringref view = readString(queryStack, - &SimpleQueryStackDumpIterator::getIndexName); - int32_t id = queryStack.getUniqueId(); - Weight weight = queryStack.GetWeight(); - t = &builder.addWeightedSetTerm(arity, view, id, weight); - pureTermView = vespalib::stringref(); - } else if (type == ParseItem::ITEM_DOT_PRODUCT) { - vespalib::stringref view = readString(queryStack, - &SimpleQueryStackDumpIterator::getIndexName); - int32_t id = queryStack.getUniqueId(); - Weight weight = queryStack.GetWeight(); - t = &builder.addDotProduct(arity, view, id, weight); - pureTermView = vespalib::stringref(); - } else if (type == ParseItem::ITEM_WAND) { - vespalib::stringref view = readString(queryStack, - &SimpleQueryStackDumpIterator::getIndexName); - int32_t id = queryStack.getUniqueId(); - Weight weight = queryStack.GetWeight(); - t = &builder.addWandTerm( - arity, view, id, weight, arg1, arg2, arg3); - pureTermView = vespalib::stringref(); - } else if (type == ParseItem::ITEM_NOT) { - builder.addAndNot(arity); - } else { - vespalib::stringref term = readString(queryStack, - &SimpleQueryStackDumpIterator::getTerm); - vespalib::stringref view = readString(queryStack, - &SimpleQueryStackDumpIterator::getIndexName); - int32_t id = queryStack.getUniqueId(); - Weight weight = queryStack.GetWeight(); - - if (type == ParseItem::ITEM_TERM) { - t = &builder.addStringTerm(term, view, id, weight); - } else if (type == ParseItem::ITEM_PURE_WEIGHTED_STRING) { - t = &builder.addStringTerm(term, pureTermView, id, weight); - } else if (type == ParseItem::ITEM_PURE_WEIGHTED_LONG) { - t = &builder.addNumberTerm(term, pureTermView, id, weight); - } else if (type == ParseItem::ITEM_PREFIXTERM) { - t = &builder.addPrefixTerm(term, view, id, weight); - } else if (type == ParseItem::ITEM_SUBSTRINGTERM) { - t = &builder.addSubstringTerm(term, view, id, weight); - } else if (type == ParseItem::ITEM_EXACTSTRINGTERM) { - t = &builder.addStringTerm(term, view, id, weight); - } else if (type == ParseItem::ITEM_SUFFIXTERM) { - t = &builder.addSuffixTerm(term, view, id, weight); - } else if (type == ParseItem::ITEM_NUMTERM) { - if (term[0] == '[' || term[0] == '<' || term[0] == '>') { - Range range(term); - t = &builder.addRangeTerm(range, view, id, weight); - } else if (term[0] == '(') { - Location loc(term); - t = &builder.addLocationTerm(loc, view, id, weight); - } else { - t = &builder.addNumberTerm(term, view, id, weight); - } - } else if (type == ParseItem::ITEM_PREDICATE_QUERY) { - t = &builder.addPredicateQuery( - queryStack.getPredicateQueryTerm(), - view, id, weight); - } else if (type == ParseItem::ITEM_REGEXP) { - t = &builder.addRegExpTerm(term, view, id, weight); - } else { - LOG(error, "Unable to create query tree from stack dump. " - "node type = %d.", type); - } - } - if (t) { + while (!builder.hasError() && queryStack.next()) { + Term *t = createQueryTerm(queryStack, builder, pureTermView); + if (!builder.hasError() && t) { t->setTermIndex(queryStack.getTermIndex()); if (queryStack.getFlags() & ParseItem::IFLAG_NORANK) { t->setRanked(false); @@ -163,11 +41,117 @@ public: } } if (builder.hasError()) { - LOG(error, "Unable to create query tree from stack dump. %s", - builder.error().c_str()); + vespalib::stringref stack = queryStack.getStack(); + LOG(error, "Unable to create query tree from stack dump. Failed at position %ld out of %ld bytes %s", + queryStack.getPosition(), stack.size(), builder.error().c_str()); + LOG(error, "Raw QueryStack = %s", vespalib::HexDump(stack.c_str(), stack.size()).toString().c_str()); + if (LOG_WOULD_LOG(debug)) { + vespalib::string query = SimpleQueryStack::StackbufToString(stack); + LOG(error, "QueryStack = %s", builder.error().c_str(), query.c_str()); + } } return builder.build(); } + +private: + static Term * createQueryTerm(search::SimpleQueryStackDumpIterator &queryStack, QueryBuilder<NodeTypes> & builder, vespalib::stringref & pureTermView) { + uint32_t arity = queryStack.getArity(); + uint32_t arg1 = queryStack.getArg1(); + double arg2 = queryStack.getArg2(); + double arg3 = queryStack.getArg3(); + ParseItem::ItemType type = queryStack.getType(); + Node::UP node; + Term *t = 0; + if (type == ParseItem::ITEM_AND) { + builder.addAnd(arity); + } else if (type == ParseItem::ITEM_RANK) { + builder.addRank(arity); + } else if (type == ParseItem::ITEM_OR) { + builder.addOr(arity); + } else if (type == ParseItem::ITEM_WORD_ALTERNATIVES) { + vespalib::stringref view = queryStack.getIndexName(); + int32_t id = queryStack.getUniqueId(); + Weight weight = queryStack.GetWeight(); + builder.addEquiv(arity, id, weight); + pureTermView = view; + } else if (type == ParseItem::ITEM_WEAK_AND) { + vespalib::stringref view = queryStack.getIndexName(); + builder.addWeakAnd(arity, arg1, view); + pureTermView = view; + } else if (type == ParseItem::ITEM_EQUIV) { + int32_t id = queryStack.getUniqueId(); + Weight weight = queryStack.GetWeight(); + builder.addEquiv(arity, id, weight); + } else if (type == ParseItem::ITEM_NEAR) { + builder.addNear(arity, arg1); + } else if (type == ParseItem::ITEM_ONEAR) { + builder.addONear(arity, arg1); + } else if (type == ParseItem::ITEM_PHRASE) { + vespalib::stringref view = queryStack.getIndexName(); + int32_t id = queryStack.getUniqueId(); + Weight weight = queryStack.GetWeight(); + t = &builder.addPhrase(arity, view, id, weight); + pureTermView = view; + } else if (type == ParseItem::ITEM_WEIGHTED_SET) { + vespalib::stringref view = queryStack.getIndexName(); + int32_t id = queryStack.getUniqueId(); + Weight weight = queryStack.GetWeight(); + t = &builder.addWeightedSetTerm(arity, view, id, weight); + pureTermView = vespalib::stringref(); + } else if (type == ParseItem::ITEM_DOT_PRODUCT) { + vespalib::stringref view = queryStack.getIndexName(); + int32_t id = queryStack.getUniqueId(); + Weight weight = queryStack.GetWeight(); + t = &builder.addDotProduct(arity, view, id, weight); + pureTermView = vespalib::stringref(); + } else if (type == ParseItem::ITEM_WAND) { + vespalib::stringref view = queryStack.getIndexName(); + int32_t id = queryStack.getUniqueId(); + Weight weight = queryStack.GetWeight(); + t = &builder.addWandTerm(arity, view, id, weight, arg1, arg2, arg3); + pureTermView = vespalib::stringref(); + } else if (type == ParseItem::ITEM_NOT) { + builder.addAndNot(arity); + } else { + vespalib::stringref term = queryStack.getTerm(); + vespalib::stringref view = queryStack.getIndexName(); + int32_t id = queryStack.getUniqueId(); + Weight weight = queryStack.GetWeight(); + + if (type == ParseItem::ITEM_TERM) { + t = &builder.addStringTerm(term, view, id, weight); + } else if (type == ParseItem::ITEM_PURE_WEIGHTED_STRING) { + t = &builder.addStringTerm(term, pureTermView, id, weight); + } else if (type == ParseItem::ITEM_PURE_WEIGHTED_LONG) { + t = &builder.addNumberTerm(term, pureTermView, id, weight); + } else if (type == ParseItem::ITEM_PREFIXTERM) { + t = &builder.addPrefixTerm(term, view, id, weight); + } else if (type == ParseItem::ITEM_SUBSTRINGTERM) { + t = &builder.addSubstringTerm(term, view, id, weight); + } else if (type == ParseItem::ITEM_EXACTSTRINGTERM) { + t = &builder.addStringTerm(term, view, id, weight); + } else if (type == ParseItem::ITEM_SUFFIXTERM) { + t = &builder.addSuffixTerm(term, view, id, weight); + } else if (type == ParseItem::ITEM_NUMTERM) { + if (term[0] == '[' || term[0] == '<' || term[0] == '>') { + Range range(term); + t = &builder.addRangeTerm(range, view, id, weight); + } else if (term[0] == '(') { + Location loc(term); + t = &builder.addLocationTerm(loc, view, id, weight); + } else { + t = &builder.addNumberTerm(term, view, id, weight); + } + } else if (type == ParseItem::ITEM_PREDICATE_QUERY) { + t = &builder.addPredicateQuery(queryStack.getPredicateQueryTerm(), view, id, weight); + } else if (type == ParseItem::ITEM_REGEXP) { + t = &builder.addRegExpTerm(term, view, id, weight); + } else { + LOG(error, "Unable to create query tree from stack dump. node type = %d.", type); + } + } + return t; + } }; } // namespace query diff --git a/searchlib/src/vespa/searchlib/queryeval/blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/blueprint.cpp index df970f4cc7e..73a7c3d7b84 100644 --- a/searchlib/src/vespa/searchlib/queryeval/blueprint.cpp +++ b/searchlib/src/vespa/searchlib/queryeval/blueprint.cpp @@ -2,7 +2,6 @@ #include <vespa/fastos/fastos.h> #include <vespa/log/log.h> -LOG_SETUP(".queryeval.blueprint"); #include "blueprint.h" #include <vespa/vespalib/objects/visit.h> #include <vespa/vespalib/objects/objectdumper.h> @@ -10,13 +9,13 @@ LOG_SETUP(".queryeval.blueprint"); #include "leaf_blueprints.h" #include "intermediate_blueprints.h" #include "equiv_blueprint.h" +#include <vespa/vespalib/util/classname.h> #include <vector> #include <set> #include <map> -// NB: might need to hide this from non-gcc compilers... -#include <cxxabi.h> +LOG_SETUP(".queryeval.blueprint"); namespace search { namespace queryeval { @@ -119,14 +118,7 @@ Blueprint::asString() const vespalib::string Blueprint::getClassName() const { - vespalib::string name(typeid(*this).name()); - int status = 0; - size_t size = 0; - // NB: might need to hide this from non-gcc compilers... - char *unmangled = abi::__cxa_demangle(name.c_str(), 0, &size, &status); - vespalib::string result(unmangled); - free(unmangled); - return result; + return vespalib::getClassName(*this); } void diff --git a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp index 70903788992..1610267c255 100644 --- a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp +++ b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp @@ -250,7 +250,7 @@ void PredicateBlueprint::fetchPostings(bool) { } PredicateAttribute::MinFeatureHandle mfh = predicate_attribute().getMinFeatureVector(); - vespalib::DefaultAlloc kv(mfh.second); + vespalib::alloc::Alloc kv(vespalib::DefaultAlloc::create(mfh.second)); _kVBacking.swap(kv); _kV = BitVectorCache::CountVector(static_cast<uint8_t *>(_kVBacking.get()), mfh.second); _index.computeCountVector(_cachedFeatures, _kV); diff --git a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h index aeab9d4175f..616460f9e55 100644 --- a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h +++ b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h @@ -72,7 +72,7 @@ private: const PredicateAttribute & _attribute; const predicate::PredicateIndex &_index; - vespalib::DefaultAlloc _kVBacking; + vespalib::alloc::Alloc _kVBacking; BitVectorCache::CountVector _kV; BitVectorCache::KeySet _cachedFeatures; diff --git a/searchlib/src/vespa/searchlib/queryeval/searchiterator.cpp b/searchlib/src/vespa/searchlib/queryeval/searchiterator.cpp index 2e439acbf14..7dee3f8a3cb 100644 --- a/searchlib/src/vespa/searchlib/queryeval/searchiterator.cpp +++ b/searchlib/src/vespa/searchlib/queryeval/searchiterator.cpp @@ -2,16 +2,14 @@ #include <vespa/fastos/fastos.h> #include <vespa/log/log.h> -LOG_SETUP(".searchbase"); #include "searchiterator.h" #include <vespa/searchlib/index/docidandfeatures.h> #include <vespa/vespalib/objects/objectdumper.h> #include <vespa/vespalib/objects/objectvisitor.h> #include <vespa/vespalib/objects/visit.h> -#include <typeinfo> +#include <vespa/vespalib/util/classname.h> -// NB: might need to hide this from non-gcc compilers... -#include <cxxabi.h> +LOG_SETUP(".searchbase"); namespace search { namespace queryeval { @@ -88,14 +86,7 @@ SearchIterator::asString() const vespalib::string SearchIterator::getClassName() const { - vespalib::string name(typeid(*this).name()); - int status = 0; - size_t size = 0; - // NB: might need to hide this from non-gcc compilers... - char *unmangled = abi::__cxa_demangle(name.c_str(), 0, &size, &status); - vespalib::string result(unmangled); - free(unmangled); - return result; + return vespalib::getClassName(*this); } void diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp index 9faa80a76b9..a102533b139 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp @@ -117,7 +117,7 @@ DomainInfo Domain::getDomainInfo() const { LockGuard guard(_lock); - DomainInfo info(SerialNumRange(begin(), end()), size(guard), byteSize()); + DomainInfo info(SerialNumRange(begin(guard), end(guard)), size(guard), byteSize(guard)); for (const auto &entry: _parts) { const DomainPart &part = *entry.second; info.parts.emplace_back(PartInfo(part.range(), part.size(), @@ -126,8 +126,16 @@ Domain::getDomainInfo() const return info; } -SerialNum Domain::begin() const +SerialNum +Domain::begin() const { + return begin(LockGuard(_lock)); +} + +SerialNum +Domain::begin(const LockGuard & guard) const +{ + assert(guard.locks(_lock)); SerialNum s(0); if ( ! _parts.empty() ) { s = _parts.begin()->second->range().from(); @@ -135,8 +143,16 @@ SerialNum Domain::begin() const return s; } -SerialNum Domain::end() const +SerialNum +Domain::end() const +{ + return end(LockGuard(_lock)); +} + +SerialNum +Domain::end(const LockGuard & guard) const { + assert(guard.locks(_lock)); SerialNum s(0); if ( ! _parts.empty() ) { s = _parts.rbegin()->second->range().to(); @@ -144,8 +160,16 @@ SerialNum Domain::end() const return s; } -size_t Domain::byteSize() const +size_t +Domain::byteSize() const { + return byteSize(LockGuard(_lock)); +} + +size_t +Domain::byteSize(const LockGuard & guard) const +{ + assert(guard.locks(_lock)); size_t size = 0; for (const auto &entry : _parts) { const DomainPart &part = *entry.second; @@ -155,7 +179,7 @@ size_t Domain::byteSize() const } SerialNum -Domain::getSynced(void) const +Domain::getSynced() const { SerialNum s(0); LockGuard guard(_lock); @@ -174,7 +198,7 @@ Domain::getSynced(void) const void -Domain::triggerSyncNow(void) +Domain::triggerSyncNow() { MonitorGuard guard(_syncMonitor); if (!_pendingSync) { @@ -203,13 +227,12 @@ DomainPart::SP Domain::findPart(SerialNum s) uint64_t Domain::size() const { - LockGuard guard(_lock); - return size(guard); + return size(LockGuard(_lock)); } uint64_t Domain::size(const LockGuard & guard) const { - (void) guard; + assert(guard.locks(_lock)); uint64_t sz(0); for (const auto & part : _parts) { sz += part.second->size(); diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.h b/searchlib/src/vespa/searchlib/transactionlog/domain.h index d6dbde6b06e..c87b1887624 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domain.h +++ b/searchlib/src/vespa/searchlib/transactionlog/domain.h @@ -64,10 +64,10 @@ public: SerialNum begin() const; SerialNum end() const; - SerialNum getSynced(void) const; - void triggerSyncNow(void); - bool getMarkedDeleted(void) const { return _markedDeleted; } - void markDeleted(void) { _markedDeleted = true; } + SerialNum getSynced() const; + void triggerSyncNow(); + bool getMarkedDeleted() const { return _markedDeleted; } + void markDeleted() { _markedDeleted = true; } size_t byteSize() const; size_t getNumSessions() const { return _sessions.size(); } @@ -87,6 +87,9 @@ public: } uint64_t size() const; private: + SerialNum begin(const vespalib::LockGuard & guard) const; + SerialNum end(const vespalib::LockGuard & guard) const; + size_t byteSize(const vespalib::LockGuard & guard) const; uint64_t size(const vespalib::LockGuard & guard) const; void cleanSessions(); vespalib::string dir() const { return getDir(_baseDir, _name); } @@ -94,7 +97,7 @@ private: typedef std::vector<SerialNum> SerialNumList; - SerialNumList scanDir(void); + SerialNumList scanDir(); typedef std::map<int, Session::SP > SessionList; typedef std::map<int64_t, DomainPart::SP > DomainPartList; @@ -102,7 +105,7 @@ private: DomainPart::Crc _defaultCrcType; Executor & _executor; - int _sessionId; + std::atomic<int> _sessionId; const bool _useFsync; vespalib::Monitor _syncMonitor; bool _pendingSync; diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp b/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp index 274a3495e73..776924a8c85 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp +++ b/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp @@ -221,7 +221,7 @@ DomainPart::buildPacketMapping(bool allowTruncate) SerialNum lastSerial(0); int64_t firstPos(currPos); bool full(false); - vespalib::DefaultAlloc buf; + vespalib::alloc::Alloc buf; for(size_t i(0); !full && (currPos < fSize); i++) { Packet::Entry e; if (read(transLog, e, buf, allowTruncate)) { @@ -552,7 +552,7 @@ DomainPart::visit(FastOS_FileInterface &file, SerialNumRange &r, Packet &packet) } if (retval) { Packet newPacket; - vespalib::DefaultAlloc buf; + vespalib::alloc::Alloc buf; for (bool full(false);!full && retval && (r.from() < r.to());) { Packet::Entry e; int64_t fPos = file.GetPosition(); @@ -612,7 +612,7 @@ DomainPart::write(FastOS_FileInterface &file, const Packet::Entry &entry) bool DomainPart::read(FastOS_FileInterface &file, Packet::Entry &entry, - vespalib::DefaultAlloc & buf, + vespalib::alloc::Alloc & buf, bool allowTruncate) { bool retval(true); @@ -636,7 +636,7 @@ DomainPart::read(FastOS_FileInterface &file, } } if (len > buf.size()) { - vespalib::DefaultAlloc(len).swap(buf); + vespalib::DefaultAlloc::create(len).swap(buf); } rlen = file.Read(buf.get(), len); retval = rlen == len; diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainpart.h b/searchlib/src/vespa/searchlib/transactionlog/domainpart.h index 04041a2cba0..59eb2f6be9a 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/domainpart.h +++ b/searchlib/src/vespa/searchlib/transactionlog/domainpart.h @@ -70,7 +70,7 @@ private: static bool read(FastOS_FileInterface &file, Packet::Entry &entry, - vespalib::DefaultAlloc &buf, + vespalib::alloc::Alloc &buf, bool allowTruncate); void write(FastOS_FileInterface &file, const Packet::Entry &entry); diff --git a/searchlib/src/vespa/searchlib/transactionlog/session.h b/searchlib/src/vespa/searchlib/transactionlog/session.h index 69d22e69fc1..432539e0e50 100644 --- a/searchlib/src/vespa/searchlib/transactionlog/session.h +++ b/searchlib/src/vespa/searchlib/transactionlog/session.h @@ -13,14 +13,15 @@ namespace transactionlog { class Domain; typedef std::shared_ptr<Domain> DomainSP; -class Session : public FRT_IRequestWait, - public vespalib::noncopyable +class Session : public FRT_IRequestWait { private: typedef vespalib::Executor::Task Task; public: typedef std::shared_ptr<Session> SP; + Session(const Session &) = delete; + Session & operator = (const Session &) = delete; Session(int sId, const SerialNumRange & r, const DomainSP & d, FRT_Supervisor & supervisor, FNET_Connection *conn, bool subscriber=false); virtual ~Session(); const SerialNumRange & range() const { return _range; } diff --git a/searchlib/src/vespa/searchlib/util/fileutil.h b/searchlib/src/vespa/searchlib/util/fileutil.h index 69d2cc557a6..b61c4e9d0ac 100644 --- a/searchlib/src/vespa/searchlib/util/fileutil.h +++ b/searchlib/src/vespa/searchlib/util/fileutil.h @@ -295,11 +295,11 @@ public: virtual void rewind() = 0; }; -template <typename T, typename A=vespalib::HeapAlloc> -class SequentialReadModifyWriteVector : public SequentialReadModifyWriteInterface<T>, public vespalib::Array<T, A> +template <typename T> +class SequentialReadModifyWriteVector : public SequentialReadModifyWriteInterface<T>, public vespalib::Array<T> { private: - typedef vespalib::Array<T, A> Vector; + typedef vespalib::Array<T> Vector; public: SequentialReadModifyWriteVector() : Vector(), _rp(0), _wp(0) { } SequentialReadModifyWriteVector(size_t sz) : Vector(sz), _rp(0), _wp(0) { } diff --git a/searchsummary/src/vespa/searchsummary/docsummary/dynamicteaserdfw.cpp b/searchsummary/src/vespa/searchsummary/docsummary/dynamicteaserdfw.cpp index 9a9041ea295..07eaad9807f 100644 --- a/searchsummary/src/vespa/searchsummary/docsummary/dynamicteaserdfw.cpp +++ b/searchsummary/src/vespa/searchsummary/docsummary/dynamicteaserdfw.cpp @@ -169,29 +169,27 @@ public: virtual const char *Index(const juniper::QueryItem* item, size_t *len) const { if (item->_si != NULL) { - const char *ret; - item->_si->getIndexName(&ret, len); - return ret; + *len = item->_si->getIndexName().size(); + return item->_si->getIndexName().c_str(); } else { + *len = item->_data->_indexlen; return item->_data->_index; } } virtual bool UsefulIndex(const juniper::QueryItem* item) const { - const char *buf; - size_t buflen; + vespalib::stringref index; if (_kwExtractor == NULL) return true; if (item->_si != NULL) { - item->_si->getIndexName(&buf, &buflen); + index = item->_si->getIndexName(); } else { - buf = item->_data->_index; - buflen = item->_data->_indexlen; + index = vespalib::stringref(item->_data->_index, item->_data->_indexlen); } - return _kwExtractor->IsLegalIndex(buf, buflen); + return _kwExtractor->IsLegalIndex(index); } }; @@ -203,8 +201,6 @@ JuniperQueryAdapter::Traverse(juniper::IQueryVisitor *v) const bool rc = true; search::SimpleQueryStackDumpIterator iterator(_buf); juniper::QueryItem item(&iterator); - const char *buf; - size_t buflen; if (_highlightTerms->numKeys() > 0) { v->VisitAND(&item, 2); @@ -234,14 +230,15 @@ JuniperQueryAdapter::Traverse(juniper::IQueryVisitor *v) const case search::ParseItem::ITEM_TERM: case search::ParseItem::ITEM_EXACTSTRINGTERM: case search::ParseItem::ITEM_PURE_WEIGHTED_STRING: - iterator.getTerm(&buf, &buflen); - v->VisitKeyword(&item, buf, buflen, false, isSpecialToken); + { + vespalib::stringref term = iterator.getTerm(); + v->VisitKeyword(&item, term.c_str(), term.size(), false, isSpecialToken); + } break; case search::ParseItem::ITEM_NUMTERM: - iterator.getTerm(&buf, &buflen); { - vespalib::string termStr(buf, buflen); - queryeval::SplitFloat splitter(termStr); + vespalib::string term = iterator.getTerm(); + queryeval::SplitFloat splitter(term); if (splitter.parts() > 1) { if (v->VisitPHRASE(&item, splitter.parts())) { for (size_t i = 0; i < splitter.parts(); ++i) { @@ -255,7 +252,7 @@ JuniperQueryAdapter::Traverse(juniper::IQueryVisitor *v) const splitter.getPart(0).c_str(), splitter.getPart(0).size(), false); } else { - v->VisitKeyword(&item, buf, buflen, false, true); + v->VisitKeyword(&item, term.c_str(), term.size(), false, true); } } break; @@ -269,16 +266,14 @@ JuniperQueryAdapter::Traverse(juniper::IQueryVisitor *v) const break; case search::ParseItem::ITEM_PREFIXTERM: case search::ParseItem::ITEM_SUBSTRINGTERM: - iterator.getTerm(&buf, &buflen); - v->VisitKeyword(&item, buf, buflen, true, isSpecialToken); + { + vespalib::stringref term = iterator.getTerm(); + v->VisitKeyword(&item, term.c_str(), term.size(), true, isSpecialToken); + } break; case search::ParseItem::ITEM_ANY: -#if (JUNIPER_RP_API_MINOR_VERSION >= 1) if (!v->VisitANY(&item, iterator.getArity())) -#else - if (!v->VisitOR(&item, iterator.getArity())) -#endif - rc = SkipItem(&iterator); + rc = SkipItem(&iterator); break; case search::ParseItem::ITEM_NEAR: if (!v->VisitNEAR(&item, iterator.getArity(),iterator.getArg1())) diff --git a/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.cpp b/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.cpp index 6d567f9a6da..51025ab1034 100644 --- a/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.cpp +++ b/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.cpp @@ -111,10 +111,9 @@ KeywordExtractor::GetLegalIndexSpec() bool -KeywordExtractor::IsLegalIndex(const char *idxName, size_t idxNameLen) const +KeywordExtractor::IsLegalIndex(vespalib::stringref idxS) const { vespalib::string resolvedIdxName; - vespalib::string idxS(idxName, idxNameLen); if (_env != NULL) { resolvedIdxName = _env->lookupIndex(idxS); @@ -136,10 +135,8 @@ KeywordExtractor::IsLegalIndex(const char *idxName, size_t idxNameLen) const char * -KeywordExtractor::ExtractKeywords(const vespalib::stringref &buf) const +KeywordExtractor::ExtractKeywords(vespalib::stringref buf) const { - const char *str_ptr; - size_t str_len; search::SimpleQueryStackDumpIterator si(buf); char keywordstore[4096]; // Initial storage for keywords buffer search::RawBuf keywords(keywordstore, sizeof(keywordstore)); @@ -171,27 +168,28 @@ KeywordExtractor::ExtractKeywords(const vespalib::stringref &buf) const keywords.reset(); goto iteratorloopend; } else { - si.getIndexName(&str_ptr, &str_len); - if (!IsLegalIndex(str_ptr, str_len)) + if (!IsLegalIndex(si.getIndexName())) continue; // Found a term - si.getTerm(&str_ptr, &str_len); + vespalib::stringref term = si.getTerm(); search::ParseItem::ItemCreator term_creator = si.getCreator(); - if (str_len > 0 && useful(term_creator)) { + if ( !term.empty() && useful(term_creator)) { // Actual term to add - if (phraseterms_was_added) + if (phraseterms_was_added) { // Not the first term in the phrase keywords += " "; - else + } else { phraseterms_was_added = true; + } - keywords.append(str_ptr, str_len); + keywords.append(term.c_str(), term.size()); } } } - if (phraseterms_was_added) + if (phraseterms_was_added) { // Terms was added, so 0-terminate the string keywords.append("\0", 1); + } break; } @@ -200,15 +198,16 @@ KeywordExtractor::ExtractKeywords(const vespalib::stringref &buf) const case search::ParseItem::ITEM_EXACTSTRINGTERM: case search::ParseItem::ITEM_NUMTERM: case search::ParseItem::ITEM_TERM: - si.getIndexName(&str_ptr, &str_len); - if (!IsLegalIndex(str_ptr, str_len)) + if (!IsLegalIndex(si.getIndexName())) continue; - // add a new keyword - si.getTerm(&str_ptr, &str_len); - if (str_len > 0 && useful(creator)) { - // An actual string to add - keywords.append(str_ptr, str_len); - keywords.append("\0", 1); + { + // add a new keyword + vespalib::stringref term = si.getTerm(); + if ( !term.empty() && useful(creator)) { + // An actual string to add + keywords.append(term.c_str(), term.size()); + keywords.append("\0", 1); + } } break; diff --git a/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.h b/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.h index 750cfb2cdee..35c9d387317 100644 --- a/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.h +++ b/searchsummary/src/vespa/searchsummary/docsummary/keywordextractor.h @@ -135,7 +135,7 @@ public: * * @return true if the given index name is legal. **/ - bool IsLegalIndex(const char *idxName, size_t idxNameLen) const; + bool IsLegalIndex(vespalib::stringref idx) const; /** @@ -156,7 +156,7 @@ public: * @return Pointer to a buffer containing zero-terminated keywords, * with an empty word at the end. */ - char *ExtractKeywords(const vespalib::stringref &buf) const; + char *ExtractKeywords(vespalib::stringref buf) const; }; } diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java b/simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java index 87ea7ffdaf0..edfa198416e 100644 --- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java +++ b/simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java @@ -14,7 +14,7 @@ import edu.umd.cs.findbugs.annotations.Nullable; * @author steinar */ @Beta -public final class Gauge { +public class Gauge { @Nullable private final Point defaultPosition; private final String name; diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java b/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java index a0b94f1e571..c45d50db065 100644 --- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java +++ b/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java @@ -55,6 +55,25 @@ public class MetricReceiver { } } + private static final class NullGauge extends Gauge { + NullGauge() { + super(null, null, null); + } + + @Override + public void sample(double x) { + } + + @Override + public void sample(double x, Point p) { + } + + @Override + public PointBuilder builder() { + return super.builder(); + } + } + private static final class NullReceiver extends MetricReceiver { NullReceiver() { super(null, null); @@ -76,12 +95,12 @@ public class MetricReceiver { @Override public Gauge declareGauge(String name) { - return null; + return new NullGauge(); } @Override public Gauge declareGauge(String name, Point boundDimensions) { - return null; + return new NullGauge(); } @Override diff --git a/simplemetrics/src/main/resources/configdefinitions/manager.def b/simplemetrics/src/main/resources/configdefinitions/manager.def index 6f6bef75fd7..11077b87177 100644 --- a/simplemetrics/src/main/resources/configdefinitions/manager.def +++ b/simplemetrics/src/main/resources/configdefinitions/manager.def @@ -2,5 +2,5 @@ version=1 namespace=metrics -reportPeriodSeconds int default=300 +reportPeriodSeconds int default=60 pointsToKeepPerMetric int default=100 diff --git a/slobrok/CMakeLists.txt b/slobrok/CMakeLists.txt index 1e074c1f814..4c78f331492 100644 --- a/slobrok/CMakeLists.txt +++ b/slobrok/CMakeLists.txt @@ -19,7 +19,6 @@ vespa_define_module( src/tests/backoff src/tests/configure src/tests/mirrorapi - src/tests/multi src/tests/oldapi src/tests/registerapi src/tests/standalone diff --git a/slobrok/src/tests/multi/.gitignore b/slobrok/src/tests/multi/.gitignore deleted file mode 100644 index a8bc1f97275..00000000000 --- a/slobrok/src/tests/multi/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -7.cfg -Makefile -multi_test -slobrok*.out -slobrok_multi_test_app diff --git a/slobrok/src/tests/multi/CMakeLists.txt b/slobrok/src/tests/multi/CMakeLists.txt deleted file mode 100644 index 276dd2f0845..00000000000 --- a/slobrok/src/tests/multi/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(slobrok_multi_test_app TEST - SOURCES - multi.cpp - DEPENDS -) diff --git a/slobrok/src/tests/multi/DESC b/slobrok/src/tests/multi/DESC deleted file mode 100644 index d38625a647b..00000000000 --- a/slobrok/src/tests/multi/DESC +++ /dev/null @@ -1 +0,0 @@ -multi-slobrok test. Take a look at multi.cpp for details. diff --git a/slobrok/src/tests/multi/FILES b/slobrok/src/tests/multi/FILES deleted file mode 100644 index bbb1480c9ec..00000000000 --- a/slobrok/src/tests/multi/FILES +++ /dev/null @@ -1 +0,0 @@ -multi.cpp diff --git a/slobrok/src/tests/multi/multi.cpp b/slobrok/src/tests/multi/multi.cpp deleted file mode 100644 index 9a8a7eb41e0..00000000000 --- a/slobrok/src/tests/multi/multi.cpp +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include <vespa/vespalib/testkit/test_kit.h> -#include <vespa/fnet/frt/frt.h> -#include <string> - -//----------------------------------------------------------------------------- - -class Server : public FRT_Invokable -{ -private: - FRT_Supervisor _orb; - std::string _name; - -public: - Server(std::string name, int port); - ~Server(); - void rpc_listNamesServed(FRT_RPCRequest *req); -}; - - -Server::Server(std::string name, int port) - : _orb(), - _name(name) -{ - { - FRT_ReflectionBuilder rb(&_orb); - //--------------------------------------------------------------------- - rb.DefineMethod("slobrok.callback.listNamesServed", "", "S", true, - FRT_METHOD(Server::rpc_listNamesServed), this); - rb.MethodDesc("Look up a rpcserver"); - rb.ReturnDesc("names", "The rpcserver names on this server"); - //--------------------------------------------------------------------- - } - _orb.Listen(port); - _orb.Start(); -} - - -void -Server::rpc_listNamesServed(FRT_RPCRequest *req) -{ - FRT_Values &dst = *req->GetReturn(); - FRT_StringValue *names = dst.AddStringArray(1); - dst.SetString(&names[0], _name.c_str()); -} - - -Server::~Server() -{ - _orb.ShutDown(true); -} - -//----------------------------------------------------------------------------- - -TEST("multi") { - FRT_Supervisor orb; - orb.Start(); - - FRT_Target *sb = orb.GetTarget(18511); - FRT_RPCRequest *req = NULL; - - // test ping against slobrok - req = orb.AllocRPCRequest(req); - req->SetMethodName("frt.rpc.ping"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - - // lookup '*' on empty slobrok - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("*"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); - - // check managed servers on empty slobrok - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.internal.listManagedRpcServers"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); - - Server a("A", 18518); - - // register server A - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.registerRpcServer"); - req->GetParams()->AddString("A"); - req->GetParams()->AddString("tcp/localhost:18518"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - - // lookup '*' should give 'A' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("*"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(0)._string_array._pt[0]._str, "A") == 0); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(1)._string_array._pt[0]._str, "tcp/localhost:18518") == 0); - - // lookup 'A' should give 'A' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("A"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(0)._string_array._pt[0]._str, "A") == 0); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(1)._string_array._pt[0]._str, "tcp/localhost:18518") == 0); - - // lookup 'B' should give '' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("B"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); - - // lookup '*/*' should give '' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("*/*"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); - - { - Server b("B", 18519); - - // register server B as 'C' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.registerRpcServer"); - req->GetParams()->AddString("C"); - req->GetParams()->AddString("tcp/localhost:18519"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(req->IsError()); - - // register server B - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.registerRpcServer"); - req->GetParams()->AddString("B"); - req->GetParams()->AddString("tcp/localhost:18519"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - - { - Server a2("A", 18520); - - // register server A(2) - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.registerRpcServer"); - req->GetParams()->AddString("A"); - req->GetParams()->AddString("tcp/localhost:18520"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(req->IsError()); - } - - // lookup '*' should give 'AB | BA' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("*"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 2); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 2); - { - FRT_StringValue *name = req->GetReturn()->GetValue(0)._string_array._pt; - FRT_StringValue *spec = req->GetReturn()->GetValue(1)._string_array._pt; - if (strcmp(name[0]._str, "A") == 0) { - ASSERT_TRUE(strcmp(name[0]._str, "A") == 0); - ASSERT_TRUE(strcmp(name[1]._str, "B") == 0); - ASSERT_TRUE(strcmp(spec[0]._str, "tcp/localhost:18518") == 0); - ASSERT_TRUE(strcmp(spec[1]._str, "tcp/localhost:18519") == 0); - } else { - ASSERT_TRUE(strcmp(name[1]._str, "A") == 0); - ASSERT_TRUE(strcmp(name[0]._str, "B") == 0); - ASSERT_TRUE(strcmp(spec[1]._str, "tcp/localhost:18518") == 0); - ASSERT_TRUE(strcmp(spec[0]._str, "tcp/localhost:18519") == 0); - } - } - } - - FastOS_Thread::Sleep(2000); - - // lookup 'B' should give '' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("B"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); - - // unregister server A (wrong spec) - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.unregisterRpcServer"); - req->GetParams()->AddString("A"); - req->GetParams()->AddString("tcp/localhost:18519"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(req->IsError()); - - // lookup 'A' should give 'A' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("A"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(0)._string_array._pt[0]._str, "A") == 0); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(1)._string_array._pt[0]._str, "tcp/localhost:18518") == 0); - - // unregister server A - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.unregisterRpcServer"); - req->GetParams()->AddString("A"); - req->GetParams()->AddString("tcp/localhost:18518"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - - // lookup 'A' should give '' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("A"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); - - // lookup '*' on empty slobrok - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("*"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); - - // unregister server A on empty slobrok - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.unregisterRpcServer"); - req->GetParams()->AddString("A"); - req->GetParams()->AddString("tcp/localhost:18518"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - - - FRT_Target *sb1 = orb.GetTarget(18512); - FRT_Target *sb2 = orb.GetTarget(18513); - FRT_Target *sb3 = orb.GetTarget(18514); - FRT_Target *sb4 = orb.GetTarget(18515); - FRT_Target *sb5 = orb.GetTarget(18516); - FRT_Target *sb6 = orb.GetTarget(18517); - - // register server A - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.registerRpcServer"); - req->GetParams()->AddString("A"); - req->GetParams()->AddString("tcp/localhost:18518"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - - - Server c("C", 18521); - Server d("D", 18522); - - for (int i=0; i < 150; i++) { - // register server C - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.registerRpcServer"); - req->GetParams()->AddString("C"); - req->GetParams()->AddString("tcp/localhost:18521"); - sb1->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - - // register server D - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.registerRpcServer"); - req->GetParams()->AddString("D"); - req->GetParams()->AddString("tcp/localhost:18522"); - sb2->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - - // lookup 'C' should give 'C' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("C"); - sb3->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(0)._string_array._pt[0]._str, "C") == 0); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(1)._string_array._pt[0]._str, "tcp/localhost:18521") == 0); - - // lookup 'C' should give 'C' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("C"); - sb4->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(0)._string_array._pt[0]._str, "C") == 0); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(1)._string_array._pt[0]._str, "tcp/localhost:18521") == 0); - - // lookup 'C' should give 'C' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("C"); - sb5->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(0)._string_array._pt[0]._str, "C") == 0); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(1)._string_array._pt[0]._str, "tcp/localhost:18521") == 0); - - // lookup 'C' should give 'C' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("C"); - sb6->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(0)._string_array._pt[0]._str, "C") == 0); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(1)._string_array._pt[0]._str, "tcp/localhost:18521") == 0); - - FastOS_Thread::Sleep(200); - - // lookup 'D' should give 'D' - req = orb.AllocRPCRequest(req); - req->SetMethodName("slobrok.lookupRpcServer"); - req->GetParams()->AddString("D"); - sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); - ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); - ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(0)._string_array._pt[0]._str, "D") == 0); - ASSERT_TRUE(strcmp(req->GetReturn()->GetValue(1)._string_array._pt[0]._str, "tcp/localhost:18522") == 0); - } - - orb.ShutDown(true); -} - -TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/slobrok/src/tests/multi/multi_test.sh b/slobrok/src/tests/multi/multi_test.sh deleted file mode 100755 index 222d7dce3b6..00000000000 --- a/slobrok/src/tests/multi/multi_test.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -e -ok=true -./start.sh -./slobrok_multi_test_app || ok=false -./stop.sh -$ok diff --git a/slobrok/src/tests/multi/start.sh b/slobrok/src/tests/multi/start.sh deleted file mode 100755 index 48d5e5d4874..00000000000 --- a/slobrok/src/tests/multi/start.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -set -e - -export VESPA_LOG_LEVEL='all -spam' - -sed s=localhost=`hostname`= < template-7.cfg > 7.cfg - -../../apps/slobrok/slobrok -c file:7.cfg -p 18511 > slobrok0.out 2>&1 & -echo $! >> pids.txt -../../apps/slobrok/slobrok -c file:7.cfg -p 18512 > slobrok1.out 2>&1 & -echo $! >> pids.txt -../../apps/slobrok/slobrok -c file:7.cfg -p 18513 > slobrok2.out 2>&1 & -echo $! >> pids.txt -../../apps/slobrok/slobrok -c file:7.cfg -p 18514 > slobrok3.out 2>&1 & -echo $! >> pids.txt -../../apps/slobrok/slobrok -c file:7.cfg -p 18515 > slobrok4.out 2>&1 & -echo $! >> pids.txt -../../apps/slobrok/slobrok -c file:7.cfg -p 18516 > slobrok5.out 2>&1 & -echo $! >> pids.txt -../../apps/slobrok/slobrok -c file:7.cfg -p 18517 > slobrok6.out 2>&1 & -echo $! >> pids.txt - -echo "Started: " `cat pids.txt` - -export VESPA_LOG_LEVEL='all -debug -spam' - -for x in 1 2 3 4 5 6 7 8 9; do - sleep $x - echo "waiting for service location brokers to start, slept $x seconds" - alive=true - for port in 18511 18512 18513 18514 18515 18516 18517; do - ../../apps/sbcmd/sbcmd $port slobrok.callback.listNamesServed || alive=false - done - if $alive; then - echo "all started ok after $x seconds" - exit 0 - fi -done -echo "giving up, this probably won't work" -exit 1 diff --git a/slobrok/src/tests/multi/stop.sh b/slobrok/src/tests/multi/stop.sh deleted file mode 100755 index e2ab6b6f71a..00000000000 --- a/slobrok/src/tests/multi/stop.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -set -e - -ok=true - -../../apps/sbcmd/sbcmd 18511 slobrok.system.stop || ok=false -../../apps/sbcmd/sbcmd 18512 slobrok.system.stop || ok=false -../../apps/sbcmd/sbcmd 18513 slobrok.system.stop || ok=false -../../apps/sbcmd/sbcmd 18514 slobrok.system.stop || ok=false -../../apps/sbcmd/sbcmd 18515 slobrok.system.stop || ok=false -../../apps/sbcmd/sbcmd 18516 slobrok.system.stop || ok=false -../../apps/sbcmd/sbcmd 18517 slobrok.system.stop || ok=false - -if $ok; then - echo "Signaled all brokers to stop OK" -fi - -for cnt in 1 2 3 4 5 6 7 8 9; do - sleep $cnt - alive=false - for x in `cat pids.txt`; do - kill $x 2>/dev/null && ps -p $x && alive=true - done - if $alive; then - echo "Some processes still alive after $cnt seconds" - else - rm -f pids.txt - $ok - exit - fi -done - -for x in `cat pids.txt`; do - kill -9 $x 2>/dev/null && echo "Force killed pid $x" -done -exit 1 diff --git a/slobrok/src/tests/multi/template-7.cfg b/slobrok/src/tests/multi/template-7.cfg deleted file mode 100644 index 6769494a51c..00000000000 --- a/slobrok/src/tests/multi/template-7.cfg +++ /dev/null @@ -1,8 +0,0 @@ -slobrok[7] -slobrok[0].connectionspec tcp/localhost:18511 -slobrok[1].connectionspec tcp/localhost:18512 -slobrok[2].connectionspec tcp/localhost:18513 -slobrok[3].connectionspec tcp/localhost:18514 -slobrok[4].connectionspec tcp/localhost:18515 -slobrok[5].connectionspec tcp/localhost:18516 -slobrok[6].connectionspec tcp/localhost:18517 diff --git a/slobrok/src/tests/standalone/standalone.cpp b/slobrok/src/tests/standalone/standalone.cpp index feeec4f508f..c17875ee571 100644 --- a/slobrok/src/tests/standalone/standalone.cpp +++ b/slobrok/src/tests/standalone/standalone.cpp @@ -52,30 +52,96 @@ Server::~Server() _orb.ShutDown(true); } +namespace { + +bool checkOk(FRT_RPCRequest *req) +{ + if (req == NULL) { + fprintf(stderr, "req is null pointer, this is bad\n"); + return false; + } + if (req->IsError()) { + fprintf(stderr, "req FAILED [code %d]: %s\n", + req->GetErrorCode(), + req->GetErrorMessage()); + fprintf(stderr, "req method is: '%s' with params:\n", req->GetMethodName()); + req->GetParams()->Print(); + fflush(stdout); // flushes output from Print() on previous line + return false; + } else { + return true; + } +} + +template<typename T> +class SubReferer +{ +private: + T* &_t; +public: + SubReferer(T* &t) : _t(t) {} + ~SubReferer() { + if (_t != NULL) _t->SubRef(); + } +}; + + +template<typename T> +class ShutDowner +{ +private: + T &_t; +public: + ShutDowner(T &t) : _t(t) {} + ~ShutDowner() { + _t.ShutDown(true); + } +}; + + +template<typename T> +class Stopper +{ +private: + T &_t; +public: + Stopper(T &t) : _t(t) {} + ~Stopper() { + _t.stop(); + } +}; + +} // namespace <unnamed> + //----------------------------------------------------------------------------- TEST("standalone") { slobrok::SlobrokServer slobrokServer(18541); + Stopper<slobrok::SlobrokServer> ssCleaner(slobrokServer); FastOS_Thread::Sleep(300); FRT_Supervisor orb; orb.Start(); + ShutDowner<FRT_Supervisor> orbCleaner(orb); FRT_Target *sb = orb.GetTarget(18541); + SubReferer<FRT_Target> sbCleaner(sb); + FRT_RPCRequest *req = NULL; + SubReferer<FRT_RPCRequest> reqCleaner(req); // test ping against slobrok req = orb.AllocRPCRequest(req); req->SetMethodName("frt.rpc.ping"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); // lookup '*' on empty slobrok req = orb.AllocRPCRequest(req); req->SetMethodName("slobrok.lookupRpcServer"); req->GetParams()->AddString("*"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); @@ -84,7 +150,7 @@ TEST("standalone") { req = orb.AllocRPCRequest(req); req->SetMethodName("slobrok.internal.listManagedRpcServers"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); @@ -97,14 +163,14 @@ TEST("standalone") { req->GetParams()->AddString("A"); req->GetParams()->AddString("tcp/localhost:18542"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); // lookup '*' should give 'A' req = orb.AllocRPCRequest(req); req->SetMethodName("slobrok.lookupRpcServer"); req->GetParams()->AddString("*"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); @@ -116,7 +182,7 @@ TEST("standalone") { req->SetMethodName("slobrok.lookupRpcServer"); req->GetParams()->AddString("A"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); @@ -128,7 +194,7 @@ TEST("standalone") { req->SetMethodName("slobrok.lookupRpcServer"); req->GetParams()->AddString("B"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); @@ -138,7 +204,7 @@ TEST("standalone") { req->SetMethodName("slobrok.lookupRpcServer"); req->GetParams()->AddString("*/*"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); @@ -160,7 +226,7 @@ TEST("standalone") { req->GetParams()->AddString("B"); req->GetParams()->AddString("tcp/localhost:18543"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); { Server a2("A", 18544); @@ -179,7 +245,7 @@ TEST("standalone") { req->SetMethodName("slobrok.lookupRpcServer"); req->GetParams()->AddString("*"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 2); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 2); @@ -207,7 +273,7 @@ TEST("standalone") { req->SetMethodName("slobrok.lookupRpcServer"); req->GetParams()->AddString("B"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); @@ -225,7 +291,7 @@ TEST("standalone") { req->SetMethodName("slobrok.lookupRpcServer"); req->GetParams()->AddString("A"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 1); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 1); @@ -238,14 +304,14 @@ TEST("standalone") { req->GetParams()->AddString("A"); req->GetParams()->AddString("tcp/localhost:18542"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); // lookup 'A' should give '' req = orb.AllocRPCRequest(req); req->SetMethodName("slobrok.lookupRpcServer"); req->GetParams()->AddString("A"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); @@ -255,7 +321,7 @@ TEST("standalone") { req->SetMethodName("slobrok.lookupRpcServer"); req->GetParams()->AddString("*"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); + ASSERT_TRUE(checkOk(req)); ASSERT_TRUE(strcmp(req->GetReturnSpec(), "SS") == 0); ASSERT_TRUE(req->GetReturn()->GetValue(0)._string_array._len == 0); ASSERT_TRUE(req->GetReturn()->GetValue(1)._string_array._len == 0); @@ -266,13 +332,7 @@ TEST("standalone") { req->GetParams()->AddString("A"); req->GetParams()->AddString("tcp/localhost:18542"); sb->InvokeSync(req, 5.0); - ASSERT_TRUE(!req->IsError()); - - sb->SubRef(); - req->SubRef(); - - slobrokServer.stop(); - orb.ShutDown(true); + ASSERT_TRUE(checkOk(req)); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/staging_vespalib/src/tests/array/allocinarray_benchmark.cpp b/staging_vespalib/src/tests/array/allocinarray_benchmark.cpp index 9f365ff937c..426cb7ee3b8 100644 --- a/staging_vespalib/src/tests/array/allocinarray_benchmark.cpp +++ b/staging_vespalib/src/tests/array/allocinarray_benchmark.cpp @@ -60,7 +60,7 @@ private: typedef TreeNode<long> N; typedef RefTreeNode<long> R; -typedef AllocInArray<R, vespalib::Array<R, MMapAlloc> > Store; +typedef AllocInArray<R, vespalib::Array<R> > Store; void populate(Store & store, uint32_t parent, size_t depth) { diff --git a/staging_vespalib/src/tests/array/allocinarray_test.cpp b/staging_vespalib/src/tests/array/allocinarray_test.cpp index 3a40a71f288..de6e5792b35 100644 --- a/staging_vespalib/src/tests/array/allocinarray_test.cpp +++ b/staging_vespalib/src/tests/array/allocinarray_test.cpp @@ -26,7 +26,7 @@ Test::Main() TEST_INIT("allocinarray_test"); testAllocInArray<int64_t, vespalib::Array<int64_t> >(); - testAllocInArray<int64_t, vespalib::Array<int64_t, vespalib::DefaultAlloc> >(); + testAllocInArray<int64_t, vespalib::Array<int64_t> >(); testAllocInArray<int64_t, std::vector<int64_t> >(); testAllocInArray<int64_t, std::deque<int64_t> >(); diff --git a/staging_vespalib/src/tests/memorydatastore/memorydatastore.cpp b/staging_vespalib/src/tests/memorydatastore/memorydatastore.cpp index 2bb5a272783..57a18fc5573 100644 --- a/staging_vespalib/src/tests/memorydatastore/memorydatastore.cpp +++ b/staging_vespalib/src/tests/memorydatastore/memorydatastore.cpp @@ -21,7 +21,7 @@ public: void MemoryDataStoreTest::testMemoryDataStore() { - MemoryDataStore s(256); + MemoryDataStore s(DefaultAlloc::create(256)); std::vector<MemoryDataStore::Reference> v; v.push_back(s.push_back("mumbo", 5)); for (size_t i(0); i < 50; i++) { diff --git a/staging_vespalib/src/tests/objectdump/objectdump.cpp b/staging_vespalib/src/tests/objectdump/objectdump.cpp index 3427bbbff3b..8e0999ee80a 100644 --- a/staging_vespalib/src/tests/objectdump/objectdump.cpp +++ b/staging_vespalib/src/tests/objectdump/objectdump.cpp @@ -6,13 +6,14 @@ LOG_SETUP("objectdump_test"); #include <vespa/vespalib/objects/identifiable.h> #include <vespa/vespalib/objects/visit.h> -using namespace vespalib; - #define CID_Base 10000000 #define CID_Foo 10000001 #define CID_Bar 10000002 #define CID_Baz 10000003 +using vespalib::ObjectVisitor; +using vespalib::IdentifiablePtr; + struct Base : public vespalib::Identifiable { DECLARE_IDENTIFIABLE(Base); diff --git a/staging_vespalib/src/vespa/vespalib/data/databuffer.cpp b/staging_vespalib/src/vespa/vespalib/data/databuffer.cpp index 43b82aaf62b..e046415e8e8 100644 --- a/staging_vespalib/src/vespa/vespalib/data/databuffer.cpp +++ b/staging_vespalib/src/vespa/vespalib/data/databuffer.cpp @@ -12,24 +12,20 @@ size_t padbefore(size_t alignment, const char *buf) { } } -template <typename T> -DataBufferT<T>::DataBufferT(size_t len, size_t alignment) +DataBuffer::DataBuffer(size_t len, size_t alignment, const Alloc & initial) : _alignment(alignment), _externalBuf(NULL), _bufstart(NULL), _bufend(NULL), _datapt(NULL), _freept(NULL), - _buffer() + _buffer(initial.create(0)) { assert(_alignment > 0); if (len > 0) { // avoid very small buffers for performance reasons: - size_t bufsize = 256; - while (bufsize < len + (_alignment - 1)) { - bufsize *= 2; - } - T newBuf(bufsize); + size_t bufsize = std::max(256ul, roundUp2inN(len + (_alignment - 1))); + Alloc newBuf(initial.create(bufsize)); _bufstart = static_cast<char *>(newBuf.get()); _buffer.swap(newBuf); @@ -41,18 +37,16 @@ DataBufferT<T>::DataBufferT(size_t len, size_t alignment) } -template <typename T> void -DataBufferT<T>::moveFreeToData(size_t len) +DataBuffer::moveFreeToData(size_t len) { assert(getFreeLen() >= len); _freept += len; } -template <typename T> void -DataBufferT<T>::moveDeadToData(size_t len) +DataBuffer::moveDeadToData(size_t len) { assert(getDeadLen() >= len); _datapt -= len; @@ -62,18 +56,16 @@ DataBufferT<T>::moveDeadToData(size_t len) } -template <typename T> void -DataBufferT<T>::moveDataToFree(size_t len) +DataBuffer::moveDataToFree(size_t len) { assert(getDataLen() >= len); _freept -= len; } -template <typename T> bool -DataBufferT<T>::shrink(size_t newsize) +DataBuffer::shrink(size_t newsize) { if (getBufSize() <= newsize || getDataLen() > newsize) { return false; @@ -81,10 +73,9 @@ DataBufferT<T>::shrink(size_t newsize) char *newbuf = NULL; char *newdata = NULL; newsize += (_alignment - 1); - T newBuf(newsize); + Alloc newBuf(_buffer.create(newsize)); if (newsize != 0) { newbuf = static_cast<char *>(newBuf.get()); - assert(newbuf != NULL); newdata = newbuf + padbefore(_alignment, newbuf); memcpy(newdata, _datapt, getDataLen()); } @@ -97,9 +88,8 @@ DataBufferT<T>::shrink(size_t newsize) } -template <typename T> void -DataBufferT<T>::pack(size_t needbytes) +DataBuffer::pack(size_t needbytes) { needbytes += (_alignment - 1); size_t dataLen = getDataLen(); @@ -107,15 +97,9 @@ DataBufferT<T>::pack(size_t needbytes) if ((getDeadLen() + getFreeLen()) < needbytes || (getDeadLen() + getFreeLen()) * 4 < dataLen) { - size_t bufsize = getBufSize() * 2; - if (bufsize < 256) { - bufsize = 256; - } - while (bufsize - dataLen < needbytes) - bufsize *= 2; - T newBuf(bufsize); + size_t bufsize = std::max(256ul, roundUp2inN(needbytes+dataLen)); + Alloc newBuf(_buffer.create(bufsize)); char *newbuf = static_cast<char *>(newBuf.get()); - assert(newbuf != NULL); char *newdata = newbuf + padbefore(_alignment, newbuf); memcpy(newdata, _datapt, dataLen); _bufstart = newbuf; @@ -132,9 +116,8 @@ DataBufferT<T>::pack(size_t needbytes) } -template <typename T> bool -DataBufferT<T>::equals(DataBufferT *other) +DataBuffer::equals(DataBuffer *other) { if (getDataLen() != other->getDataLen()) return false; @@ -142,9 +125,8 @@ DataBufferT<T>::equals(DataBufferT *other) } -template <typename T> void -DataBufferT<T>::hexDump() +DataBuffer::hexDump() { char *pt = _datapt; printf("*** DataBuffer HexDump BEGIN ***\n"); @@ -160,9 +142,8 @@ DataBufferT<T>::hexDump() } -template <typename T> void -DataBufferT<T>::swap(DataBufferT &other) +DataBuffer::swap(DataBuffer &other) { _buffer.swap(other._buffer); std::swap(_alignment, other._alignment); @@ -173,9 +154,8 @@ DataBufferT<T>::swap(DataBufferT &other) std::swap(_freept, other._freept); } -template <typename T> -T -DataBufferT<T>::stealBuffer() +vespalib::alloc::Alloc +DataBuffer::stealBuffer() { assert( ! referencesExternalData() ); _externalBuf = nullptr; @@ -186,15 +166,9 @@ DataBufferT<T>::stealBuffer() return std::move(_buffer); } -template <typename T> bool -DataBufferT<T>::referencesExternalData() const { +DataBuffer::referencesExternalData() const { return (_externalBuf == _bufstart) && (getBufSize() > 0); } -template class DataBufferT<HeapAlloc>; -template class DataBufferT<MMapAlloc>; -template class DataBufferT<DefaultAlloc>; - - } // namespace vespalib diff --git a/staging_vespalib/src/vespa/vespalib/data/databuffer.h b/staging_vespalib/src/vespa/vespalib/data/databuffer.h index 7db17808eae..53e9e54d0f5 100644 --- a/staging_vespalib/src/vespa/vespalib/data/databuffer.h +++ b/staging_vespalib/src/vespa/vespalib/data/databuffer.h @@ -30,23 +30,22 @@ namespace vespalib { * the data will be relocated within the buffer and/or a bigger buffer * will be allocated. **/ -template <typename T> -class DataBufferT +class DataBuffer { private: + using Alloc = vespalib::alloc::Alloc; size_t _alignment; char *_externalBuf; char *_bufstart; char *_bufend; char *_datapt; char *_freept; - T _buffer; - - DataBufferT(const DataBufferT &); - DataBufferT &operator=(const DataBufferT &); + Alloc _buffer; public: - typedef std::unique_ptr<DataBufferT<T>> UP; + typedef std::unique_ptr<DataBuffer> UP; + DataBuffer(const DataBuffer &) = delete; + DataBuffer &operator=(const DataBuffer &) = delete; /** * Construct a databuffer. @@ -54,7 +53,7 @@ public: * @param len the initial size of the buffer. * @param alignment required memory alignment for data start **/ - DataBufferT(size_t len = 1024, size_t alignment = 1); + DataBuffer(size_t len = 1024, size_t alignment = 1, const Alloc & initial = vespalib::DefaultAlloc::create(0)); /** * Construct a databuffer using externally allocated memory. Note @@ -64,22 +63,24 @@ public: * @param buf pointer to preallocated memory * @param len length of preallocated memory **/ - DataBufferT(char *buf, size_t len) : + DataBuffer(char *buf, size_t len) : _alignment(1), _externalBuf(buf), _bufstart(buf), _bufend(buf + len), _datapt(_bufstart), - _freept(_bufstart) + _freept(_bufstart), + _buffer(vespalib::DefaultAlloc::create(0)) { } - DataBufferT(const char *buf, size_t len) : + DataBuffer(const char *buf, size_t len) : _alignment(1), _externalBuf(const_cast<char *>(buf)), _bufstart(_externalBuf), _bufend(_bufstart + len), _datapt(_bufstart), - _freept(_bufend) + _freept(_bufend), + _buffer(vespalib::DefaultAlloc::create(0)) { } /** @@ -570,7 +571,7 @@ public: * @return true(equal)/false(not equal) * @param other the other buffer. **/ - bool equals(DataBufferT *other); + bool equals(DataBuffer *other); /** * Print a human-readable representation of this buffer to @@ -601,13 +602,10 @@ public: * * @param other the other buffer. **/ - void swap(DataBufferT &other); + void swap(DataBuffer &other); - T stealBuffer(); + Alloc stealBuffer(); }; -typedef DataBufferT<DefaultAlloc> DataBuffer; -typedef DataBufferT<MMapAlloc> MMapDataBuffer; - } // namespace vespalib diff --git a/staging_vespalib/src/vespa/vespalib/objects/identifiable.cpp b/staging_vespalib/src/vespa/vespalib/objects/identifiable.cpp index bac071c8048..c6459219fc4 100644 --- a/staging_vespalib/src/vespa/vespalib/objects/identifiable.cpp +++ b/staging_vespalib/src/vespa/vespalib/objects/identifiable.cpp @@ -9,7 +9,7 @@ #include "visit.h" #include "objectpredicate.h" #include "objectoperation.h" -#include <cxxabi.h> +#include <vespa/vespalib/util/classname.h> namespace vespalib { @@ -187,13 +187,7 @@ Identifiable::UP Identifiable::create(Deserializer & is) string Identifiable::getNativeClassName() const { - string name(typeid(*this).name()); - int status = 0; - size_t size = 0; - char *unmangled = abi::__cxa_demangle(name.c_str(), 0, &size, &status); - string result(unmangled); - free(unmangled); - return result; + return vespalib::getClassName(*this); } string diff --git a/staging_vespalib/src/vespa/vespalib/stllike/CMakeLists.txt b/staging_vespalib/src/vespa/vespalib/stllike/CMakeLists.txt index cf36a7f210f..e6b34d52615 100644 --- a/staging_vespalib/src/vespa/vespalib/stllike/CMakeLists.txt +++ b/staging_vespalib/src/vespa/vespalib/stllike/CMakeLists.txt @@ -1,6 +1,5 @@ # Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. vespa_add_library(staging_vespalib_vespalib_stllike OBJECT SOURCES - smallvector.cpp DEPENDS ) diff --git a/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h b/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h index d0fb4259b30..b48096f184c 100644 --- a/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h +++ b/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h @@ -189,8 +189,10 @@ private: void ref(const internal_iterator & it); insert_result insert(value_type && value); void removeOld(); - class RecordMoves : public noncopyable { + class RecordMoves { public: + RecordMoves(const RecordMoves &) = delete; + RecordMoves & operator = (const RecordMoves &) = delete; RecordMoves(lrucache_map & lru) : _lru(lru) { diff --git a/staging_vespalib/src/vespa/vespalib/stllike/smallvector.h b/staging_vespalib/src/vespa/vespalib/stllike/smallvector.h deleted file mode 100644 index 931e8ef433d..00000000000 --- a/staging_vespalib/src/vespa/vespalib/stllike/smallvector.h +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -/** - * A vector type implementation that is optimized for keeping a small amount of - * elements. If a small amount is kept, no malloc will be done within the - * vector implementation. - */ - -#pragma once - -#include <vespa/fastos/fastos.h> -#include <iterator> -#include <memory> -#include <vector> - -namespace vespalib { - -/** - * A generic iterator implementation using size() and operator[] to access - * elements. - */ -template<typename Container, typename T> -class IndexedContainerIterator - : public std::iterator<std::random_access_iterator_tag, T> -{ - Container* _container; - uint64_t _index; - -public: - typedef IndexedContainerIterator<Container, T> Iterator; - typedef typename std::iterator<std::random_access_iterator_tag, T>::difference_type difference_type; - // Required to be possible to default construct iterators - IndexedContainerIterator() : _container(0), _index(-1) {} - IndexedContainerIterator(Container& c, uint64_t index) - : _container(&c), _index(index) {} - - T& operator*() { return (*_container)[_index]; } - T* operator->() { return &(*_container)[_index]; } - - bool operator==(const Iterator& o) const { - return (_index == o._index); - } - bool operator!=(const Iterator& o) const { - return (_index != o._index); - } - bool operator<(const Iterator& o) const { - return (_index < o._index); - } - - Iterator& operator++() { - ++_index; - return *this; - } - Iterator operator++(int) { - return Iterator(*_container, _index++); - } - Iterator& operator--() { - --_index; - return *this; - } - Iterator operator--(int) { - return Iterator(*_container, _index--); - } - - Iterator operator+(const difference_type& v) { - return Iterator(*_container, _index + v); - } - Iterator operator-(const difference_type& v) { - return Iterator(*_container, _index - v); - } - difference_type operator-(const Iterator& o) { - return _index - o._index; - } -}; - -template <typename T, size_t S = 8> -class SmallVector { - size_t _size; - T _smallVector[S]; - std::vector<T> _bigVector; - -public: - typedef IndexedContainerIterator<SmallVector<T, S>, T> iterator; - typedef IndexedContainerIterator<const SmallVector<T, S>, const T> const_iterator; - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef size_t difference_type; - typedef size_t size_type; - - iterator begin() { return iterator(*this, 0); } - iterator end() { return iterator(*this, _size); } - const_iterator begin() const { return const_iterator(*this, 0); } - const_iterator end() const { return const_iterator(*this, _size); } - - SmallVector() : _size(0) {} - - SmallVector(std::initializer_list<T> elems) - : _size(0) - { - for (auto it=elems.begin(); it != elems.end(); ++it) { - push_back(*it); - } - } - - SmallVector(const SmallVector<T, S>& other) = delete; - SmallVector<T, S>& operator=(const SmallVector<T, S>& other) = delete; - - size_t getEfficientSizeLimit() const { return S; } - - void push_back(const T& t) { - if (_size < S) { - _smallVector[_size] = t; - ++_size; - } else { - if (_size == S) { - populateVector(); - } - _bigVector.push_back(t); - ++_size; - } - } - void pop_back() { - if (_size <= S) { - --_size; - } else { - if (--_size == S) { - _bigVector.clear(); - } else { - _bigVector.pop_back(); - } - } - } - const T& back() const { return operator[](_size - 1); } - T& back() { return operator[](_size - 1); } - const T& front() const { return operator[](0); } - T& front() { return operator[](0); } - void clear() { - _size = 0; - _bigVector.clear(); - } - const T& operator[](size_t i) const { - if (i < S) { - return _smallVector[i]; - } else { - return _bigVector[i]; - } - } - T& operator[](size_t i) { - if (i < S) { - return _smallVector[i]; - } else { - return _bigVector[i]; - } - } - bool empty() const { return (_size == 0); } - size_t size() const { return _size; } - - template<typename O> - bool operator==(const O& o) const { - if (size() != o.size()) return false; - for (size_t i=0; i<_size; ++i) { - if ((*this)[i] != o[i]) return false; - } - return true; - } - template<typename O> - bool operator!=(const O& o) const { - return !(operator==(o)); - } - - void erase(iterator eraseIt) { - SmallVector<T, S> copy; - for (auto it = begin(); it != end(); ++it) { - if (it != eraseIt) { - copy.push_back(*it); - } - } - copy.swap(*this); - } - -private: - void populateVector() { - _bigVector.reserve(S+1); - for (size_t i=0; i<S; ++i) { - _bigVector.push_back(_smallVector[i]); - } - } -}; - - -} // namespace vespalib diff --git a/staging_vespalib/src/vespa/vespalib/util/allocinarray.h b/staging_vespalib/src/vespa/vespalib/util/allocinarray.h index a6a89c4d41a..755d1885eee 100644 --- a/staging_vespalib/src/vespa/vespalib/util/allocinarray.h +++ b/staging_vespalib/src/vespa/vespalib/util/allocinarray.h @@ -16,7 +16,7 @@ namespace vespalib { * - when the AllocInArray goes out of scope. * - on an explicit clear. **/ -template <typename T, typename V=vespalib::Array<T, HeapAlloc> > +template <typename T, typename V=vespalib::Array<T> > class AllocInArray { public: typedef uint32_t Index; diff --git a/staging_vespalib/src/vespa/vespalib/util/growablebytebuffer.cpp b/staging_vespalib/src/vespa/vespalib/util/growablebytebuffer.cpp index 96e74ecf007..b17accddc5d 100644 --- a/staging_vespalib/src/vespa/vespalib/util/growablebytebuffer.cpp +++ b/staging_vespalib/src/vespa/vespalib/util/growablebytebuffer.cpp @@ -5,7 +5,7 @@ using namespace vespalib; GrowableByteBuffer::GrowableByteBuffer(uint32_t initialLen) : - _buffer(initialLen), + _buffer(DefaultAlloc::create(initialLen)), _position(0) { } @@ -16,7 +16,7 @@ GrowableByteBuffer::allocate(uint32_t len) size_t need(_position + len); if (need > _buffer.size()) { uint32_t newSize = vespalib::roundUp2inN(need); - DefaultAlloc newBuf(newSize); + Alloc newBuf(DefaultAlloc::create(newSize)); memcpy(newBuf.get(), _buffer.get(), _position); _buffer.swap(newBuf); } diff --git a/staging_vespalib/src/vespa/vespalib/util/growablebytebuffer.h b/staging_vespalib/src/vespa/vespalib/util/growablebytebuffer.h index 61baa250e40..fd65f0134bc 100644 --- a/staging_vespalib/src/vespa/vespalib/util/growablebytebuffer.h +++ b/staging_vespalib/src/vespa/vespalib/util/growablebytebuffer.h @@ -85,8 +85,8 @@ public: private: void putReverse(const char* buffer, uint32_t length); - - DefaultAlloc _buffer; + using Alloc = vespalib::alloc::Alloc; + Alloc _buffer; uint32_t _position; double _growFactor; diff --git a/staging_vespalib/src/vespa/vespalib/util/programoptions.cpp b/staging_vespalib/src/vespa/vespalib/util/programoptions.cpp index a4b120ab492..a108c9ad151 100644 --- a/staging_vespalib/src/vespa/vespalib/util/programoptions.cpp +++ b/staging_vespalib/src/vespa/vespalib/util/programoptions.cpp @@ -5,6 +5,7 @@ #include <iostream> #include <vespa/log/log.h> #include <vespa/vespalib/util/exceptions.h> +#include <boost/lexical_cast.hpp> LOG_SETUP(".programoptions"); diff --git a/staging_vespalib/src/vespa/vespalib/util/programoptions.h b/staging_vespalib/src/vespa/vespalib/util/programoptions.h index cbba95ae150..fa41f85fec8 100644 --- a/staging_vespalib/src/vespa/vespalib/util/programoptions.h +++ b/staging_vespalib/src/vespa/vespalib/util/programoptions.h @@ -23,7 +23,6 @@ #pragma once -#include <boost/lexical_cast.hpp> #include <map> #include <set> #include <string> diff --git a/standalone-container/src/main/scala/com/yahoo/application/container/impl/ClassLoaderOsgiFramework.scala b/standalone-container/src/main/scala/com/yahoo/application/container/impl/ClassLoaderOsgiFramework.scala index 6d45e6fa8a1..5a41462cb48 100644 --- a/standalone-container/src/main/scala/com/yahoo/application/container/impl/ClassLoaderOsgiFramework.scala +++ b/standalone-container/src/main/scala/com/yahoo/application/container/impl/ClassLoaderOsgiFramework.scala @@ -17,6 +17,8 @@ import org.osgi.framework.wiring._ import org.osgi.resource.{Wire, Capability, Requirement} /** + * A (mock) OSGI implementation which loads classes from the system classpath + * * @author tonytv */ final class ClassLoaderOsgiFramework extends OsgiFramework { @@ -197,4 +199,5 @@ final class ClassLoaderOsgiFramework extends OsgiFramework { override def createFilter(filter: String) = throw new UnsupportedOperationException } + } diff --git a/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneContainerApplication.scala b/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneContainerApplication.scala index 324de2771f4..fc57353c194 100644 --- a/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneContainerApplication.scala +++ b/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneContainerApplication.scala @@ -2,7 +2,7 @@ package com.yahoo.container.standalone import com.google.inject.{Key, AbstractModule, Injector, Inject} -import com.yahoo.config.application.api.{DeployLogger, RuleConfigDeriver, FileRegistry, ApplicationPackage} +import com.yahoo.config.application.api.{RuleConfigDeriver, FileRegistry, ApplicationPackage} import com.yahoo.config.provision.Zone import com.yahoo.jdisc.application.Application import com.yahoo.container.jdisc.ConfiguredApplication @@ -10,6 +10,7 @@ import java.io.{IOException, File} import com.yahoo.config.model.test.MockRoot import com.yahoo.config.model.application.provider._ import com.yahoo.vespa.defaults.Defaults +import com.yahoo.vespa.model.VespaModel import com.yahoo.vespa.model.container.xml.{ConfigServerContainerModelBuilder, ManhattanContainerModelBuilder, ContainerModelBuilder} import org.w3c.dom.Element import com.yahoo.config.model.builder.xml.XmlHelper @@ -134,9 +135,9 @@ object StandaloneContainerApplication { tmpDir.toFile } - private def validateApplication(applicationPackage: ApplicationPackage, logger: DeployLogger) = { + private def validateApplication(applicationPackage: ApplicationPackage) = { try { - applicationPackage.validateXML(logger) + applicationPackage.validateXML() } catch { case e: IOException => throw new IllegalArgumentException(e) } @@ -164,14 +165,14 @@ object StandaloneContainerApplication { fileRegistry: FileRegistry, preprocessedApplicationDir: File, networkingOption: Networking, - configModelRepo: ConfigModelRepo = new ConfigModelRepo): (MockRoot, Container) = { + configModelRepo: ConfigModelRepo = new ConfigModelRepo): (VespaModel, Container) = { val logger = new BaseDeployLogger - val rawApplicationPackage = new FilesApplicationPackage.Builder(applicationPath.toFile).preprocessedDir(preprocessedApplicationDir).build() + val rawApplicationPackage = new FilesApplicationPackage.Builder(applicationPath.toFile).includeSourceFiles(true).preprocessedDir(preprocessedApplicationDir).build() // TODO: Needed until we get rid of semantic rules val applicationPackage = rawApplicationPackage.preprocess(Zone.defaultZone(), new RuleConfigDeriver { override def derive(ruleBaseDir: String, outputDir: String): Unit = {} }, logger) - validateApplication(applicationPackage, logger) + validateApplication(applicationPackage) val deployState = new DeployState.Builder(). applicationPackage(applicationPackage). fileRegistry(fileRegistry). @@ -179,12 +180,13 @@ object StandaloneContainerApplication { configDefinitionRepo(configDefinitionRepo). build() - val root = new MockRoot("", deployState) + val root = VespaModel.createIncomplete(deployState) val vespaRoot = new ApplicationConfigProducerRoot(root, "vespa", deployState.getDocumentModel, deployState.getProperties.vespaVersion(), deployState.getProperties.applicationId()) + val spec = containerRootElement(applicationPackage) val containerModel = newContainerModelBuilder(networkingOption).build(deployState, configModelRepo, vespaRoot, spec) @@ -192,11 +194,16 @@ object StandaloneContainerApplication { containerModel.initialize(configModelRepo) val container = first(containerModel.getCluster().getContainers) + // TODO: If we can do the mutations below on the builder, we can separate out model finalization from the + // VespaModel constructor, such that the above and below code to finalize the container can be + // replaced by root.finalize(); + // Always disable rpc server for standalone container. This server will soon be removed anyway. container.setRpcServerEnabled(false) container.setHttpServerEnabled(networkingOption == Networking.enable) initializeContainer(container, spec) + root.freezeModelTopology() (root, container) } diff --git a/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneSubscriberFactory.scala b/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneSubscriberFactory.scala index 432e5b82946..4ac88eaafae 100644 --- a/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneSubscriberFactory.scala +++ b/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneSubscriberFactory.scala @@ -4,6 +4,7 @@ package com.yahoo.container.standalone import com.yahoo.config.model.test.MockRoot import com.yahoo.config.{ConfigBuilder, ConfigInstance} import com.yahoo.container.di.ConfigKeyT +import com.yahoo.vespa.model.VespaModel import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ import com.yahoo.vespa.config.ConfigKey @@ -14,7 +15,7 @@ import StandaloneSubscriberFactory._ * @author tonytv * @author gjoranv */ -class StandaloneSubscriberFactory(root: MockRoot) extends SubscriberFactory { +class StandaloneSubscriberFactory(root: VespaModel) extends SubscriberFactory { class StandaloneSubscriber(configKeys: Set[ConfigKeyT]) extends Subscriber { override def configChanged = generation == 0 diff --git a/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainerActivatorTest.java b/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainerActivatorTest.java index 484d4c4d50e..f847f05108e 100644 --- a/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainerActivatorTest.java +++ b/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainerActivatorTest.java @@ -24,7 +24,7 @@ import static org.hamcrest.collection.IsEmptyCollection.empty; import static org.junit.Assert.assertThat; /** - * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a> + * @author Einar M R Rosenvinge * @since 5.22.0 */ public class StandaloneContainerActivatorTest { diff --git a/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainer.scala b/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainer.scala index 7152c0c0af1..f0c2ce6fa0d 100644 --- a/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainer.scala +++ b/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainer.scala @@ -32,7 +32,7 @@ object StandaloneContainer { } } - def withContainerModel[T](containerNode: Node)(f: MockRoot => T) { + def withContainerModel[T](containerNode: Node)(f: VespaModel => T) { withTempDirectory { applicationPath => createServicesXml(applicationPath, containerNode) diff --git a/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainerTest.scala b/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainerTest.scala index 41026e1c263..2705322ab32 100644 --- a/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainerTest.scala +++ b/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainerTest.scala @@ -58,7 +58,7 @@ class StandaloneContainerTest { </services> StandaloneContainer.withContainerModel(servicesXml) { root => - assertNotNull(root.getProducer("container-1/standalone")) + assertTrue(root.getConfigProducer("container-1/standalone").isPresent) } } @@ -72,10 +72,10 @@ class StandaloneContainerTest { </jdisc> StandaloneContainer.withContainerModel(xml) { root => - val container = root.getProducer("jdisc/standalone").asInstanceOf[AbstractService] + val container = root.getConfigProducer("jdisc/standalone").get().asInstanceOf[AbstractService] println("portCnt: " + container.getPortCount) println("numPorts: " + container.getNumPortsAllocated) - assertThat(container.getNumPortsAllocated, is(1)) + assertEquals(1, container.getNumPortsAllocated) } } diff --git a/storage/src/tests/bucketdb/judyarraytest.cpp b/storage/src/tests/bucketdb/judyarraytest.cpp index 235c0c9eb5c..4a6157fc93e 100644 --- a/storage/src/tests/bucketdb/judyarraytest.cpp +++ b/storage/src/tests/bucketdb/judyarraytest.cpp @@ -61,18 +61,6 @@ JudyArrayTest::testIterating() foundVals = getJudyArrayContents(array); CPPUNIT_ASSERT_EQUAL(values, foundVals); - { // Test that both postfix operator work - JudyArray::iterator it = array.begin(); - JudyArray::iterator it2 = it++; - CPPUNIT_ASSERT_EQUAL(JudyArray::value_type(values[0]), *it2); - CPPUNIT_ASSERT_EQUAL(JudyArray::value_type(values[1]), *it); - - // And that iterator comparisons work - CPPUNIT_ASSERT_EQUAL(it2, array.begin()); - CPPUNIT_ASSERT_EQUAL(it, ++array.begin()); - CPPUNIT_ASSERT(!(it == it2)); - CPPUNIT_ASSERT(it != it2); - } { // Test that we can alter through non-const iterator JudyArray::iterator it = array.begin(); ++it; @@ -127,7 +115,7 @@ JudyArrayTest::testDualArrayFunctions() CPPUNIT_ASSERT_EQUAL(values1, getJudyArrayContents(array1)); CPPUNIT_ASSERT_EQUAL(values2, getJudyArrayContents(array2)); - CPPUNIT_ASSERT(array1 > array2); + CPPUNIT_ASSERT(array2 < array1); CPPUNIT_ASSERT(array1 != array2); array1.swap(array2); CPPUNIT_ASSERT_EQUAL(values1, getJudyArrayContents(array2)); @@ -142,10 +130,7 @@ JudyArrayTest::testDualArrayFunctions() } CPPUNIT_ASSERT(array1 != array3); CPPUNIT_ASSERT_EQUAL(array2, array3); - CPPUNIT_ASSERT(array2 >= array3); - CPPUNIT_ASSERT(array2 <= array3); CPPUNIT_ASSERT(!(array2 < array3)); - CPPUNIT_ASSERT(!(array2 > array3)); } void diff --git a/storage/src/tests/bucketdb/judymultimaptest.cpp b/storage/src/tests/bucketdb/judymultimaptest.cpp index f63fad9aa06..b737155d6a9 100644 --- a/storage/src/tests/bucketdb/judymultimaptest.cpp +++ b/storage/src/tests/bucketdb/judymultimaptest.cpp @@ -135,37 +135,35 @@ JudyMultiMapTest::testIterator() MultiMap::Iterator iter = multiMap.begin(); CPPUNIT_ASSERT_EQUAL((uint64_t)11, (uint64_t)iter.key()); CPPUNIT_ASSERT_EQUAL(A(4, 6, 0), iter.value()); - iter++; + ++iter; CPPUNIT_ASSERT_EQUAL((uint64_t)14, (uint64_t)iter.key()); CPPUNIT_ASSERT_EQUAL(A(42, 0, 0), iter.value()); - iter++; + ++iter; CPPUNIT_ASSERT_EQUAL((uint64_t)16, (uint64_t)iter.key()); CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), iter.value()); - iter--; + --iter; CPPUNIT_ASSERT_EQUAL((uint64_t)14, (uint64_t)iter.key()); CPPUNIT_ASSERT_EQUAL(A(42, 0, 0), iter.value()); - iter++; + ++iter; CPPUNIT_ASSERT_EQUAL((uint64_t)16, (uint64_t)iter.key()); CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), iter.value()); - iter--; - iter--; + --iter; + --iter; CPPUNIT_ASSERT_EQUAL((uint64_t)11,(uint64_t) iter.key()); CPPUNIT_ASSERT_EQUAL(A(4, 6, 0), iter.value()); - iter++; - iter++; - iter++; + ++iter; + ++iter; + ++iter; CPPUNIT_ASSERT_EQUAL(multiMap.end(), iter); - iter--; + --iter; CPPUNIT_ASSERT_EQUAL((uint64_t)16, (uint64_t)iter.key()); CPPUNIT_ASSERT_EQUAL(A(1, 2, 3), iter.value()); - iter--; + --iter; CPPUNIT_ASSERT_EQUAL((uint64_t)14, (uint64_t)iter.key()); CPPUNIT_ASSERT_EQUAL(A(42, 0, 0), iter.value()); - iter--; + --iter; CPPUNIT_ASSERT_EQUAL((uint64_t)11,(uint64_t) iter.key()); CPPUNIT_ASSERT_EQUAL(A(4, 6, 0), iter.value()); - - } } // storage diff --git a/storage/src/tests/bucketdb/lockablemaptest.cpp b/storage/src/tests/bucketdb/lockablemaptest.cpp index 0f35f51afbd..ad1cc1dcef4 100644 --- a/storage/src/tests/bucketdb/lockablemaptest.cpp +++ b/storage/src/tests/bucketdb/lockablemaptest.cpp @@ -158,48 +158,36 @@ LockableMapTest::testComparison() { // Check empty state is correct CPPUNIT_ASSERT_EQUAL(map1, map2); - CPPUNIT_ASSERT(map1 <= map2); - CPPUNIT_ASSERT(map1 >= map2); CPPUNIT_ASSERT(!(map1 < map2)); - CPPUNIT_ASSERT(!(map1 > map2)); CPPUNIT_ASSERT(!(map1 != map2)); // Check that different lengths are oki map1.insert(4, A(1, 2, 3), "foo", preExisted); CPPUNIT_ASSERT(!(map1 == map2)); - CPPUNIT_ASSERT(!(map1 <= map2)); CPPUNIT_ASSERT(!(map1 < map2)); - CPPUNIT_ASSERT(map1 >= map2); - CPPUNIT_ASSERT(map1 > map2); + CPPUNIT_ASSERT(map2 < map1); CPPUNIT_ASSERT(map1 != map2); // Check that equal elements are oki map2.insert(4, A(1, 2, 3), "foo", preExisted); CPPUNIT_ASSERT_EQUAL(map1, map2); - CPPUNIT_ASSERT(map1 <= map2); - CPPUNIT_ASSERT(map1 >= map2); CPPUNIT_ASSERT(!(map1 < map2)); - CPPUNIT_ASSERT(!(map1 > map2)); CPPUNIT_ASSERT(!(map1 != map2)); // Check that non-equal values are oki map1.insert(6, A(1, 2, 6), "foo", preExisted); map2.insert(6, A(1, 2, 3), "foo", preExisted); CPPUNIT_ASSERT(!(map1 == map2)); - CPPUNIT_ASSERT(!(map1 <= map2)); CPPUNIT_ASSERT(!(map1 < map2)); - CPPUNIT_ASSERT(map1 >= map2); - CPPUNIT_ASSERT(map1 > map2); + CPPUNIT_ASSERT(map2 < map1); CPPUNIT_ASSERT(map1 != map2); // Check that non-equal keys are oki map1.erase(6, "foo"); map1.insert(7, A(1, 2, 3), "foo", preExisted); CPPUNIT_ASSERT(!(map1 == map2)); - CPPUNIT_ASSERT(!(map1 <= map2)); CPPUNIT_ASSERT(!(map1 < map2)); - CPPUNIT_ASSERT(map1 >= map2); - CPPUNIT_ASSERT(map1 > map2); + CPPUNIT_ASSERT(map2 < map1); CPPUNIT_ASSERT(map1 != map2); } diff --git a/storage/src/vespa/storage/bucketdb/judyarray.h b/storage/src/vespa/storage/bucketdb/judyarray.h index 963fdb86f98..c539e1e8469 100644 --- a/storage/src/vespa/storage/bucketdb/judyarray.h +++ b/storage/src/vespa/storage/bucketdb/judyarray.h @@ -16,7 +16,6 @@ #pragma once -#include <boost/operators.hpp> #include <vespa/vespalib/util/exceptions.h> #include <vespa/vespalib/util/printable.h> #include <vespa/fastos/fastos.h> @@ -25,7 +24,7 @@ namespace storage { -class JudyArray : public vespalib::Printable, public boost::operators<JudyArray> +class JudyArray : public vespalib::Printable { JudyArray(const JudyArray&); // Deny copying JudyArray& operator=(const JudyArray&); @@ -49,6 +48,9 @@ public: virtual ~JudyArray(); bool operator==(const JudyArray& array) const; + bool operator!=(const JudyArray& array) const { + return ! (*this == array); + } bool operator<(const JudyArray& array) const; /** Warning: Size may be a O(n) function (Unknown implementation in judy) */ @@ -86,14 +88,16 @@ public: virtual void print(std::ostream& out, bool verbose, const std::string& indent) const; - class ConstIterator : public vespalib::Printable, - public boost::operators<ConstIterator> + class ConstIterator : public vespalib::Printable { public: ConstIterator& operator--(); - ConstIterator& operator++(); // Prefix, postfix provided by boost + ConstIterator& operator++(); - bool operator==(const ConstIterator &cp) const; // != provided by boost + bool operator==(const ConstIterator &cp) const; + bool operator!=(const ConstIterator &cp) const { + return ! (*this == cp); + } value_type operator*() const { return value_type(_key, *_data); } bool end() const { return (_data == 0); } @@ -115,8 +119,7 @@ public: friend class JudyArray; }; - class Iterator : public ConstIterator, - public boost::operators<Iterator> + class Iterator : public ConstIterator { public: Iterator& operator--() diff --git a/storage/src/vespa/storage/bucketdb/judymultimap.h b/storage/src/vespa/storage/bucketdb/judymultimap.h index ea7c005dc24..c6f0194c7bd 100644 --- a/storage/src/vespa/storage/bucketdb/judymultimap.h +++ b/storage/src/vespa/storage/bucketdb/judymultimap.h @@ -95,14 +95,16 @@ public: virtual void print(std::ostream& out, bool verbose, const std::string& indent) const; - class ConstIterator : public vespalib::Printable, - public boost::operators<ConstIterator> + class ConstIterator : public vespalib::Printable { public: ConstIterator& operator--() { --_iterator; return *this; } ConstIterator& operator++() { ++_iterator; return *this; } - bool operator==(const ConstIterator &cp) const; // != provided by boost + bool operator==(const ConstIterator &cp) const; + bool operator!=(const ConstIterator &cp) const { + return ! (*this == cp); + } value_type operator*() const; inline bool end() const { return _iterator.end(); } @@ -129,8 +131,7 @@ public: mutable std::pair<key_type, mapped_type> _pair; }; - class Iterator : public ConstIterator, - public boost::operators<Iterator> + class Iterator : public ConstIterator { public: Iterator& operator--() @@ -150,10 +151,10 @@ public: private: JudyArray _judyArray; - typedef vespalib::Array<Type0, vespalib::DefaultAlloc> Type0Vector; - typedef vespalib::Array<Type1, vespalib::DefaultAlloc> Type1Vector; - typedef vespalib::Array<Type2, vespalib::DefaultAlloc> Type2Vector; - typedef vespalib::Array<Type3, vespalib::DefaultAlloc> Type3Vector; + typedef vespalib::Array<Type0> Type0Vector; + typedef vespalib::Array<Type1> Type1Vector; + typedef vespalib::Array<Type2> Type2Vector; + typedef vespalib::Array<Type3> Type3Vector; Type0Vector _values0; Type1Vector _values1; Type2Vector _values2; diff --git a/storage/src/vespa/storage/bucketdb/lockablemap.h b/storage/src/vespa/storage/bucketdb/lockablemap.h index 95d9326eff0..25a1cf09d0b 100644 --- a/storage/src/vespa/storage/bucketdb/lockablemap.h +++ b/storage/src/vespa/storage/bucketdb/lockablemap.h @@ -28,14 +28,14 @@ namespace storage { template<typename Map> -class LockableMap : public vespalib::Printable, - public boost::operators<LockableMap<Map> > +class LockableMap : public vespalib::Printable { public: typedef typename Map::key_type key_type; typedef typename Map::mapped_type mapped_type; typedef typename Map::value_type value_type; typedef typename Map::size_type size_type; + using BucketId = document::BucketId; /** Responsible for releasing lock in map when out of scope. */ class LockKeeper { @@ -66,8 +66,8 @@ public: bool locked() const { return _lockKeeper.get(); } const key_type& getKey() const { return _lockKeeper->_key; }; - document::BucketId getBucketId() const { - return document::BucketId(document::BucketId::keyToBucketId(getKey())); + BucketId getBucketId() const { + return BucketId(BucketId::keyToBucketId(getKey())); } protected: @@ -114,6 +114,9 @@ public: LockableMap(); bool operator==(const LockableMap& other) const; + bool operator!=(const LockableMap& other) const { + return ! (*this == other); + } bool operator<(const LockableMap& other) const; typename Map::size_type size() const; size_type getMemoryUsage() const; @@ -173,15 +176,15 @@ public: * bucket. Usually, there should be only one such bucket, but in the case * of inconsistent splitting, there may be more than one. */ - std::map<document::BucketId, WrappedEntry> - getContained(const document::BucketId& bucketId, const char* clientId); + std::map<BucketId, WrappedEntry> + getContained(const BucketId& bucketId, const char* clientId); WrappedEntry createAppropriateBucket(uint16_t newBucketBits, const char* clientId, - const document::BucketId& bucket); + const BucketId& bucket); - typedef std::map<document::BucketId, WrappedEntry> EntryMap; + typedef std::map<BucketId, WrappedEntry> EntryMap; /** * Returns all buckets in the bucket database that can contain the given @@ -189,10 +192,7 @@ public: * * If sibling is != 0, also fetch that bucket if possible. */ - EntryMap getAll( - const document::BucketId& bucketId, - const char* clientId, - const document::BucketId& sibling = document::BucketId(0)); + EntryMap getAll(const BucketId& bucketId, const char* clientId, const BucketId& sibling = BucketId(0)); /** * Returns true iff bucket has no superbuckets or sub-buckets in the @@ -270,9 +270,9 @@ private: /** * Returns the given bucket, its super buckets and its sub buckets. */ - void getAllWithoutLocking(const document::BucketId& bucket, - const document::BucketId& sibling, - std::vector<document::BucketId::Type>& keys); + void getAllWithoutLocking(const BucketId& bucket, + const BucketId& sibling, + std::vector<BucketId::Type>& keys); /** * Retrieves the most specific bucket id (highest used bits) that matches @@ -284,25 +284,25 @@ private: * If not found, nextKey is set to the key after one that could have * matched and we return false. */ - bool getMostSpecificMatch(const document::BucketId& bucket, - document::BucketId& result, - document::BucketId::Type& keyResult, - document::BucketId::Type& nextKey); + bool getMostSpecificMatch(const BucketId& bucket, + BucketId& result, + BucketId::Type& keyResult, + BucketId::Type& nextKey); /** * Finds all buckets that can contain the given bucket, except for the * bucket itself (that is, its super buckets) */ - void getAllContaining(const document::BucketId& bucket, - std::vector<document::BucketId::Type>& keys); + void getAllContaining(const BucketId& bucket, + std::vector<BucketId::Type>& keys); /** * Find the given list of keys in the map and add them to the map of * results, locking them in the process. */ - void addAndLockResults(const std::vector<document::BucketId::Type> keys, + void addAndLockResults(const std::vector<BucketId::Type> keys, const char* clientId, - std::map<document::BucketId, WrappedEntry>& results, + std::map<BucketId, WrappedEntry>& results, vespalib::MonitorGuard& guard); }; @@ -692,12 +692,9 @@ LockableMap<Map>::print(std::ostream& out, bool verbose, out << "LockableMap {\n" << indent << " "; if (verbose) { - for (typename Map::const_iterator iter = _map.begin(); - iter != _map.end(); - iter++) { - out << "Key: " << - document::BucketId(document::BucketId::keyToBucketId(iter->first)) - << " Value: " << iter->second << "\n" << indent << " "; + for (const auto & entry : _map) { + out << "Key: " << BucketId(BucketId::keyToBucketId(entry.first)) + << " Value: " << entry.second << "\n" << indent << " "; } out << "\n" << indent << " Locked keys: "; @@ -747,8 +744,7 @@ bool checkContains(document::BucketId::Type key, const document::BucketId& bucket, document::BucketId& result, document::BucketId::Type& keyResult) { - document::BucketId id = document::BucketId( - document::BucketId::keyToBucketId(key)); + document::BucketId id = document::BucketId(document::BucketId::keyToBucketId(key)); if (id.contains(bucket)) { result = id; keyResult = key; @@ -772,10 +768,10 @@ checkContains(document::BucketId::Type key, const document::BucketId& bucket, */ template<typename Map> bool -LockableMap<Map>::getMostSpecificMatch(const document::BucketId& bucket, - document::BucketId& result, - document::BucketId::Type& keyResult, - document::BucketId::Type& nextKey) +LockableMap<Map>::getMostSpecificMatch(const BucketId& bucket, + BucketId& result, + BucketId::Type& keyResult, + BucketId::Type& nextKey) { typename Map::const_iterator iter = _map.lower_bound(bucket.toKey()); @@ -809,17 +805,17 @@ LockableMap<Map>::getMostSpecificMatch(const document::BucketId& bucket, */ template<typename Map> void -LockableMap<Map>::getAllContaining(const document::BucketId& bucket, - std::vector<document::BucketId::Type>& keys) +LockableMap<Map>::getAllContaining(const BucketId& bucket, + std::vector<BucketId::Type>& keys) { - document::BucketId id = bucket; + BucketId id = bucket; // Find other buckets that contain this bucket. // TODO: Optimize? while (id.getUsedBits() > 1) { id.setUsedBits(id.getUsedBits() - 1); id = id.stripUnused(); - document::BucketId::Type key = id.toKey(); + BucketId::Type key = id.toKey(); typename Map::const_iterator iter = _map.find(key); if (iter != _map.end()) { @@ -831,9 +827,9 @@ LockableMap<Map>::getAllContaining(const document::BucketId& bucket, template<typename Map> void LockableMap<Map>::addAndLockResults( - const std::vector<document::BucketId::Type> keys, + const std::vector<BucketId::Type> keys, const char* clientId, - std::map<document::BucketId, WrappedEntry>& results, + std::map<BucketId, WrappedEntry>& results, vespalib::MonitorGuard& guard) { // Wait until all buckets are free to be added, then add them all. @@ -858,8 +854,7 @@ LockableMap<Map>::addAndLockResults( typename Map::iterator it = _map.find(keys[i]); if (it != _map.end()) { _lockedKeys.insert(LockId(keys[i], clientId)); - results[document::BucketId( - document::BucketId::keyToBucketId(keys[i]))] + results[BucketId(BucketId::keyToBucketId(keys[i]))] = WrappedEntry(*this, keys[i], it->second, clientId, true); } @@ -873,8 +868,8 @@ namespace { uint8_t getMinDiffBits(uint16_t minBits, const document::BucketId& a, const document::BucketId& b) { for (uint32_t i = minBits; i <= std::min(a.getUsedBits(), b.getUsedBits()); i++) { - document::BucketId a1 = document::BucketId(i, a.getRawId()); - document::BucketId b1 = document::BucketId(i, b.getRawId()); + document::BucketId a1(i, a.getRawId()); + document::BucketId b1(i, b.getRawId()); if (b1.getId() != a1.getId()) { return i; } @@ -889,7 +884,7 @@ typename LockableMap<Map>::WrappedEntry LockableMap<Map>::createAppropriateBucket( uint16_t newBucketBits, const char* clientId, - const document::BucketId& bucket) + const BucketId& bucket) { vespalib::MonitorGuard guard(_lock); typename Map::const_iterator iter = _map.lower_bound(bucket.toKey()); @@ -898,19 +893,17 @@ LockableMap<Map>::createAppropriateBucket( // bucket's used bits should be the highest used bits it can be while // still being different from both of these. if (iter != _map.end()) { - newBucketBits = getMinDiffBits(newBucketBits, - document::BucketId(document::BucketId::keyToBucketId(iter->first)), bucket); + newBucketBits = getMinDiffBits(newBucketBits, BucketId(BucketId::keyToBucketId(iter->first)), bucket); } if (iter != _map.begin()) { --iter; - newBucketBits = getMinDiffBits(newBucketBits, - document::BucketId(document::BucketId::keyToBucketId(iter->first)), bucket); + newBucketBits = getMinDiffBits(newBucketBits, BucketId(BucketId::keyToBucketId(iter->first)), bucket); } - document::BucketId newBucket(newBucketBits, bucket.getRawId()); + BucketId newBucket(newBucketBits, bucket.getRawId()); newBucket.setUsedBits(newBucketBits); - document::BucketId::Type key = newBucket.stripUnused().toKey(); + BucketId::Type key = newBucket.stripUnused().toKey(); LockId lid(key, clientId); ackquireKey(lid, guard); @@ -922,17 +915,17 @@ LockableMap<Map>::createAppropriateBucket( template<typename Map> std::map<document::BucketId, typename LockableMap<Map>::WrappedEntry> -LockableMap<Map>::getContained(const document::BucketId& bucket, +LockableMap<Map>::getContained(const BucketId& bucket, const char* clientId) { vespalib::MonitorGuard guard(_lock); - std::map<document::BucketId, WrappedEntry> results; + std::map<BucketId, WrappedEntry> results; - document::BucketId result; - document::BucketId::Type keyResult; - document::BucketId::Type nextKey; + BucketId result; + BucketId::Type keyResult; + BucketId::Type nextKey; - std::vector<document::BucketId::Type> keys; + std::vector<BucketId::Type> keys; if (getMostSpecificMatch(bucket, result, keyResult, nextKey)) { keys.push_back(keyResult); @@ -955,13 +948,13 @@ LockableMap<Map>::getContained(const document::BucketId& bucket, template<typename Map> void -LockableMap<Map>::getAllWithoutLocking(const document::BucketId& bucket, - const document::BucketId& sibling, - std::vector<document::BucketId::Type>& keys) +LockableMap<Map>::getAllWithoutLocking(const BucketId& bucket, + const BucketId& sibling, + std::vector<BucketId::Type>& keys) { - document::BucketId result; - document::BucketId::Type keyResult; - document::BucketId::Type nextKey; + BucketId result; + BucketId::Type keyResult; + BucketId::Type nextKey; typename Map::iterator it = _map.end(); @@ -974,7 +967,7 @@ LockableMap<Map>::getAllWithoutLocking(const document::BucketId& bucket, it = _map.find(keyResult); if (it != _map.end()) { // Skipping nextKey, since it was equal to keyResult - it++; + ++it; } } else { // Find the super buckets for the input bucket @@ -986,19 +979,17 @@ LockableMap<Map>::getAllWithoutLocking(const document::BucketId& bucket, if (it != _map.end()) { // Nextkey might be contained in the imput bucket, // e.g. if it is the first bucket in bucketdb - document::BucketId id = document::BucketId( - document::BucketId::keyToBucketId(it->first)); + BucketId id = BucketId(BucketId::keyToBucketId(it->first)); if (!bucket.contains(id)) { - it++; + ++it; } } } // Buckets contained in the found bucket will come immediately after it. // Traverse the map to find them. - for (; it != _map.end(); it++) { - document::BucketId id( - document::BucketId(document::BucketId::keyToBucketId(it->first))); + for (; it != _map.end(); ++it) { + BucketId id(BucketId(BucketId::keyToBucketId(it->first))); if (bucket.contains(id)) { keys.push_back(it->first); @@ -1017,13 +1008,13 @@ LockableMap<Map>::getAllWithoutLocking(const document::BucketId& bucket, */ template<typename Map> std::map<document::BucketId, typename LockableMap<Map>::WrappedEntry> -LockableMap<Map>::getAll(const document::BucketId& bucket, const char* clientId, - const document::BucketId& sibling) +LockableMap<Map>::getAll(const BucketId& bucket, const char* clientId, + const BucketId& sibling) { vespalib::MonitorGuard guard(_lock); - std::map<document::BucketId, WrappedEntry> results; - std::vector<document::BucketId::Type> keys; + std::map<BucketId, WrappedEntry> results; + std::vector<BucketId::Type> keys; getAllWithoutLocking(bucket, sibling, keys); @@ -1038,8 +1029,8 @@ LockableMap<Map>::isConsistent(const typename LockableMap<Map>::WrappedEntry& en { vespalib::MonitorGuard guard(_lock); - document::BucketId sibling(0); - std::vector<document::BucketId::Type> keys; + BucketId sibling(0); + std::vector<BucketId::Type> keys; getAllWithoutLocking(entry.getBucketId(), sibling, keys); assert(keys.size() >= 1); @@ -1058,7 +1049,7 @@ LockableMap<Map>::showLockClients(vespalib::asciistream & out) const it != _lockedKeys.end(); ++it) { out << "\n " - << document::BucketId(document::BucketId::keyToBucketId(it->_key)) + << BucketId(BucketId::keyToBucketId(it->_key)) << " - " << it->_owner; } out << "\nClients waiting for keys:"; @@ -1066,7 +1057,7 @@ LockableMap<Map>::showLockClients(vespalib::asciistream & out) const it != _lockWaiters.end(); ++it) { out << "\n " - << document::BucketId(document::BucketId::keyToBucketId(it->second._key)) + << BucketId(BucketId::keyToBucketId(it->second._key)) << " - " << it->second._owner; } } diff --git a/storage/src/vespa/storage/bucketmover/bucketmover.cpp b/storage/src/vespa/storage/bucketmover/bucketmover.cpp index dab6f2ebdc0..f6319b7ef55 100644 --- a/storage/src/vespa/storage/bucketmover/bucketmover.cpp +++ b/storage/src/vespa/storage/bucketmover/bucketmover.cpp @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/fastos/fastos.h> -#include <boost/lexical_cast.hpp> #include <iomanip> #include <vespa/storage/bucketmover/bucketmover.h> #include <vespa/storage/bucketmover/htmltable.h> @@ -31,8 +30,7 @@ BucketMover::BucketMover(const config::ConfigUri & configUri, { if (!configUri.empty()) { using vespa::config::content::core::StorBucketmoverConfig; - _configFetcher.subscribe<StorBucketmoverConfig>( - configUri.getConfigId(), this); + _configFetcher.subscribe<StorBucketmoverConfig>(configUri.getConfigId(), this); _configFetcher.start(); } _component.registerStatusPage(*this); @@ -193,8 +191,7 @@ BucketMover::tick() { vespalib::MonitorGuard monitor(_wait); - framework::SecondTime currentTime( - _component.getClock().getTimeInSeconds()); + framework::SecondTime currentTime(_component.getClock().getTimeInSeconds()); if (_currentRun.get() == 0) { if (currentTime >= _nextRun) { diff --git a/storage/src/vespa/storage/common/hostreporter/cpureporter.cpp b/storage/src/vespa/storage/common/hostreporter/cpureporter.cpp index 5ed6d560975..d7f59e07d4e 100644 --- a/storage/src/vespa/storage/common/hostreporter/cpureporter.cpp +++ b/storage/src/vespa/storage/common/hostreporter/cpureporter.cpp @@ -8,8 +8,7 @@ #include <vespa/vespalib/stllike/string.h> #include <vespa/vespalib/text/stringtokenizer.h> #include "kernelmetrictool.h" - -#include <boost/array.hpp> +#include <array> LOG_SETUP(".cpureporter"); @@ -30,7 +29,7 @@ const vespalib::string priorityText[proprityLevels] = struct CpuInfo { int _cpuIndex; - boost::array<uint64_t, proprityLevels> _usage; + std::array<uint64_t, proprityLevels> _usage; CpuInfo(int index) : _cpuIndex(index) {} uint64_t getTotalUsage() const { diff --git a/storage/src/vespa/storage/common/statusmetricconsumer.cpp b/storage/src/vespa/storage/common/statusmetricconsumer.cpp index 4f94fbb0be8..d81de03919d 100644 --- a/storage/src/vespa/storage/common/statusmetricconsumer.cpp +++ b/storage/src/vespa/storage/common/statusmetricconsumer.cpp @@ -436,19 +436,6 @@ StatusMetricConsumer::printHtmlMetricsReport( std::ostream& out, const metrics::MetricSnapshot& data, bool includeNotUsed) const { - using namespace boost::assign; - - /* - std::cerr << "All metrics available:\n"; - for (MetricSnapshot::const_iterator it = data.begin(); - it != data.end(); ++it) - { - std::cerr << " '" << it->first << "' => '"; - it->second->printXml(std::cerr); - std::cerr << "'.\n"; - } - */ - std::map<String, Metric::SP> usedMetrics; out << "<h2>Metrics report for the last " diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergelimiter.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/mergelimiter.cpp index 18a2f8c8118..618ba9ba884 100644 --- a/storage/src/vespa/storage/distributor/operations/idealstate/mergelimiter.cpp +++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergelimiter.cpp @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/storage/distributor/operations/idealstate/mergelimiter.h> -#include <vespa/vdslib/container/smallvector.h> #include <vespa/log/log.h> @@ -19,7 +18,7 @@ MergeLimiter::MergeLimiter(uint16_t maxNodes) namespace { class EqualCopies { uint32_t _checksum; - lib::SmallVector<MergeMetaData> _copies; + std::vector<MergeMetaData> _copies; uint32_t _trustedCopies; public: @@ -47,14 +46,8 @@ namespace { } }; - vespalib::asciistream& operator<<(vespalib::asciistream& out, - const EqualCopies& e) - { - return out << "EqualCopies(" << e.size() << ")"; - } - class Statistics { - lib::SmallVector<EqualCopies> _groups; + std::vector<EqualCopies> _groups; uint32_t _trustedCopies; public: @@ -62,6 +55,7 @@ namespace { Statistics(const MergeLimiter::NodeArray& a) : _trustedCopies(0) { + _groups.reserve(a.size()); for (uint32_t i=0, n=a.size(); i<n; ++i) { add(a[i]); if (a[i].trusted()) { @@ -87,18 +81,20 @@ namespace { uint32_t trustedCount() const { return _trustedCopies; } Statistics extractGroupsWithTrustedCopies() { - lib::SmallVector<EqualCopies> _remaining; + std::vector<EqualCopies> remaining; Statistics trusted; + remaining.reserve(_groups.size()); + trusted._groups.reserve(_groups.size()); for (uint32_t i=0, n=_groups.size(); i<n; ++i) { if (_groups[i].hasTrusted()) { trusted._groups.push_back(_groups[i]); trusted._trustedCopies += _groups[i].trustedCount(); } else { - _remaining.push_back(_groups[i]); + remaining.push_back(_groups[i]); _trustedCopies -= _groups[i].trustedCount(); } } - swap(_remaining, _groups); + swap(remaining, _groups); return trusted; } bool extractNext(MergeMetaData& data, uint32_t& last) { @@ -112,9 +108,12 @@ namespace { return true; } void removeGroup(uint32_t groupIndex) { - lib::SmallVector<EqualCopies> remaining; + std::vector<EqualCopies> remaining; + remaining.reserve(_groups.size()-1); for (uint32_t i=0, n=_groups.size(); i<n; ++i) { - if (i != groupIndex) remaining.push_back(_groups[i]); + if (i != groupIndex) { + remaining.push_back(_groups[i]); + } } remaining.swap(_groups); } diff --git a/storage/src/vespa/storage/distributor/operationtargetresolver.h b/storage/src/vespa/storage/distributor/operationtargetresolver.h index 87757ec3227..380ce3ae1e6 100644 --- a/storage/src/vespa/storage/distributor/operationtargetresolver.h +++ b/storage/src/vespa/storage/distributor/operationtargetresolver.h @@ -7,7 +7,6 @@ #pragma once #include <vespa/document/bucket/bucketid.h> -#include <vespa/vdslib/container/smallvector.h> #include <vespa/vdslib/state/node.h> #include <vespa/vespalib/util/printable.h> @@ -42,7 +41,7 @@ public: } }; -class OperationTargetList : public lib::SmallVector<OperationTarget> { +class OperationTargetList : public std::vector<OperationTarget> { public: bool hasAnyNewCopies() const { for (size_t i=0; i<size(); ++i) { diff --git a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h index 08d79c501e4..36cb02dc213 100644 --- a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h +++ b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h @@ -27,7 +27,7 @@ struct BucketInstance : public vespalib::AsciiPrintable { }; class BucketInstanceList : public vespalib::AsciiPrintable { - lib::SmallVector<BucketInstance> _instances; + std::vector<BucketInstance> _instances; /** * Resolve and return the least specific bucket in the subtree of (and @@ -73,8 +73,8 @@ public: OperationTargetList createTargets(); - void print(vespalib::asciistream& out, const PrintProperties& p) const { - _instances.print(out, p); + void print(vespalib::asciistream& out, const PrintProperties& p) const override { + vespalib::print(_instances, out, p); } }; diff --git a/storage/src/vespa/storage/tools/generatedistributionbits.cpp b/storage/src/vespa/storage/tools/generatedistributionbits.cpp index 0bf27c5f05c..83bf602d0c3 100644 --- a/storage/src/vespa/storage/tools/generatedistributionbits.cpp +++ b/storage/src/vespa/storage/tools/generatedistributionbits.cpp @@ -1,6 +1,5 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/fastos/fastos.h> -#include <boost/assign.hpp> #include <vespa/document/bucket/bucketidfactory.h> #include <vespa/vespalib/util/programoptions.h> #include <vespa/vdslib/distribution/distribution.h> @@ -60,13 +59,10 @@ namespace storage { } void finalize() { - using namespace boost::assign; if (highRange) { - nodeCounts += 16, 20, 32, 48, 64, 100, 128, 160, 200, 256, 350, - 500, 800, 1000, 5000; + nodeCounts.insert(nodeCounts.begin(), {16, 20, 32, 48, 64, 100, 128, 160, 200, 256, 350, 500, 800, 1000, 5000}); } else { - nodeCounts += 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, - 11, 12, 13, 14, 15; + nodeCounts.insert(nodeCounts.begin(), {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}); } for (uint32_t i=1; i<=maxBit; ++i) { bitCounts.push_back(i); diff --git a/storageapi/src/vespa/storageapi/mbusprot/storagereply.cpp b/storageapi/src/vespa/storageapi/mbusprot/storagereply.cpp index b9fde51eaef..0165348968e 100644 --- a/storageapi/src/vespa/storageapi/mbusprot/storagereply.cpp +++ b/storageapi/src/vespa/storageapi/mbusprot/storagereply.cpp @@ -4,13 +4,17 @@ #include <vespa/storageapi/mbusprot/storagecommand.h> +using vespalib::DefaultAlloc; +using vespalib::alloc::Alloc; +using vespalib::IllegalStateException; + namespace storage { namespace mbusprot { StorageReply::StorageReply(const mbus::BlobRef& data, const ProtocolSerialization& serializer) : _serializer(&serializer), - _buffer(data.size()), + _buffer(DefaultAlloc::create(data.size())), _mbusType(0), _reply() { @@ -38,20 +42,16 @@ StorageReply::deserialize() const StorageReply& reply(const_cast<StorageReply&>(*this)); mbus::Message::UP msg(reply.getMessage()); if (msg.get() == 0) { - throw vespalib::IllegalStateException( - "Cannot deserialize storage reply before message have been set", - VESPA_STRLOC); + throw IllegalStateException("Cannot deserialize storage reply before message have been set", VESPA_STRLOC); } const StorageCommand* cmd(dynamic_cast<const StorageCommand*>(msg.get())); reply.setMessage(std::move(msg)); if (cmd == 0) { - throw vespalib::IllegalStateException( - "Storage reply get message did not return a storage command", - VESPA_STRLOC); + throw IllegalStateException("Storage reply get message did not return a storage command", VESPA_STRLOC); } mbus::BlobRef blobRef(static_cast<char *>(_buffer.get()), _buffer.size()); _reply = _serializer->decodeReply(blobRef, *cmd->getCommand())->getReply(); - vespalib::DefaultAlloc().swap(_buffer); + Alloc().swap(_buffer); } } // mbusprot diff --git a/storageapi/src/vespa/storageapi/mbusprot/storagereply.h b/storageapi/src/vespa/storageapi/mbusprot/storagereply.h index 52f7efc50cc..429833bf08b 100644 --- a/storageapi/src/vespa/storageapi/mbusprot/storagereply.h +++ b/storageapi/src/vespa/storageapi/mbusprot/storagereply.h @@ -11,7 +11,7 @@ namespace mbusprot { class StorageReply : public mbus::Reply, public StorageMessage { const ProtocolSerialization* _serializer; - mutable vespalib::DefaultAlloc _buffer; + mutable vespalib::alloc::Alloc _buffer; uint32_t _mbusType; mutable api::StorageReply::SP _reply; diff --git a/storageapi/src/vespa/storageapi/message/bucket.h b/storageapi/src/vespa/storageapi/message/bucket.h index 28b3ba36f30..b47aa01c63e 100644 --- a/storageapi/src/vespa/storageapi/message/bucket.h +++ b/storageapi/src/vespa/storageapi/message/bucket.h @@ -409,7 +409,7 @@ public: : _bucketId(id), _info(info) {} friend std::ostream& operator<<(std::ostream& os, const Entry&); }; - typedef vespalib::Array<Entry, vespalib::DefaultAlloc> EntryVector; + typedef vespalib::Array<Entry> EntryVector; private: EntryVector _buckets; diff --git a/testutil/pom.xml b/testutil/pom.xml index 0d2aa8a4dbf..bd46d2e4c16 100644 --- a/testutil/pom.xml +++ b/testutil/pom.xml @@ -15,6 +15,11 @@ <description>Library of useful Hamcrest matchers.</description> <dependencies> <dependency> + <groupId>com.google.guava</groupId> + <artifactId>guava</artifactId> + <scope>provided</scope> + </dependency> + <dependency> <groupId>org.hamcrest</groupId> <artifactId>hamcrest-core</artifactId> <scope>compile</scope> @@ -25,6 +30,11 @@ <scope>compile</scope> </dependency> <dependency> + <groupId>uk.co.datumedge</groupId> + <artifactId>hamcrest-json</artifactId> + <scope>compile</scope> + </dependency> + <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <scope>compile</scope> diff --git a/config/src/test/java/com/yahoo/config/subscription/util/JsonHelper.java b/testutil/src/main/java/com/yahoo/test/json/JsonTestHelper.java index 27ac1a1278c..a8e8e562b2d 100644 --- a/config/src/test/java/com/yahoo/config/subscription/util/JsonHelper.java +++ b/testutil/src/main/java/com/yahoo/test/json/JsonTestHelper.java @@ -1,15 +1,16 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.config.subscription.util; +package com.yahoo.test.json; import com.google.common.base.Joiner; +import org.hamcrest.MatcherAssert; -import static org.hamcrest.MatcherAssert.assertThat; import static uk.co.datumedge.hamcrest.json.SameJSONAs.sameJSONAs; /** * @author Vegard Sjonfjell */ -public class JsonHelper { +public class JsonTestHelper { + /** * Convenience method to input JSON without escaping double quotes and newlines * Each parameter represents a line of JSON encoded data @@ -23,6 +24,6 @@ public class JsonHelper { * Structurally compare two JSON encoded strings */ public static void assertJsonEquals(String inputJson, String expectedJson) { - assertThat(inputJson, sameJSONAs(expectedJson)); + MatcherAssert.assertThat(inputJson, sameJSONAs(expectedJson)); } } diff --git a/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java b/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java index b3d572e48ae..d70b55c66a2 100644 --- a/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java +++ b/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java @@ -11,6 +11,9 @@ import java.util.*; */ public class ClusterState implements Cloneable { + private static final NodeState DEFAULT_STORAGE_UP_NODE_STATE = new NodeState(NodeType.STORAGE, State.UP); + private static final NodeState DEFAULT_DISTRIBUTOR_UP_NODE_STATE = new NodeState(NodeType.DISTRIBUTOR, State.UP); + private int version = 0; private State state = State.DOWN; // nodeStates maps each of the non-up nodes that have an index <= the node count for its type. @@ -30,6 +33,22 @@ public class ClusterState implements Cloneable { deserialize(serialized); } + /** + * Parse a given cluster state string into a returned ClusterState instance, wrapping any + * parse exceptions in a RuntimeException. + */ + public static ClusterState stateFromString(final String stateStr) { + try { + return new ClusterState(stateStr); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static ClusterState emptyState() { + return stateFromString(""); + } + public ClusterState clone() { try{ ClusterState state = (ClusterState) super.clone(); @@ -61,22 +80,81 @@ public class ClusterState implements Cloneable { return true; } + @FunctionalInterface + private interface NodeStateCmp { + boolean similar(NodeType nodeType, NodeState lhs, NodeState rhs); + } + public boolean similarTo(Object o) { if (!(o instanceof ClusterState)) { return false; } - ClusterState other = (ClusterState) o; + final ClusterState other = (ClusterState) o; - if (state.equals(State.DOWN) && other.state.equals(State.DOWN)) return true; // both down, means equal (why??) - if (version != other.version || !state.equals(other.state)) return false; - if (distributionBits != other.distributionBits) return false; - if ( ! nodeCount.equals(other.nodeCount)) return false; + return similarToImpl(other, this::normalizedNodeStateSimilarTo); + } + + public boolean similarToIgnoringInitProgress(final ClusterState other) { + return similarToImpl(other, this::normalizedNodeStateSimilarToIgnoringInitProgress); + } - for (Map.Entry<Node, NodeState> nodeStateEntry : nodeStates.entrySet()) { - NodeState otherNodeState = other.nodeStates.get(nodeStateEntry.getKey()); - if (otherNodeState == null || ! otherNodeState.similarTo(nodeStateEntry.getValue())) return false; + private boolean similarToImpl(final ClusterState other, final NodeStateCmp nodeStateCmp) { + // Two cluster states are considered similar if they are both down. When clusters + // are down, their individual node states do not matter to ideal state computations + // and content nodes therefore do not need to observe them. + if (state.equals(State.DOWN) && other.state.equals(State.DOWN)) { + return true; + } + if (!metaInformationSimilarTo(other)) { + return false; + } + // TODO verify behavior of C++ impl against this + for (Node node : unionNodeSetWith(other.nodeStates.keySet())) { + final NodeState lhs = nodeStates.get(node); + final NodeState rhs = other.nodeStates.get(node); + if (!nodeStateCmp.similar(node.getType(), lhs, rhs)) { + return false; + } } return true; } + private Set<Node> unionNodeSetWith(final Set<Node> otherNodes) { + final Set<Node> unionNodeSet = new TreeSet<Node>(nodeStates.keySet()); + unionNodeSet.addAll(otherNodes); + return unionNodeSet; + } + + private boolean metaInformationSimilarTo(final ClusterState other) { + if (version != other.version || !state.equals(other.state)) { + return false; + } + if (distributionBits != other.distributionBits) { + return false; + } + return nodeCount.equals(other.nodeCount); + } + + private boolean normalizedNodeStateSimilarTo(final NodeType nodeType, final NodeState lhs, final NodeState rhs) { + final NodeState lhsNormalized = (lhs != null ? lhs : defaultUpNodeState(nodeType)); + final NodeState rhsNormalized = (rhs != null ? rhs : defaultUpNodeState(nodeType)); + + return lhsNormalized.similarTo(rhsNormalized); + } + + private boolean normalizedNodeStateSimilarToIgnoringInitProgress( + final NodeType nodeType, final NodeState lhs, final NodeState rhs) + { + final NodeState lhsNormalized = (lhs != null ? lhs : defaultUpNodeState(nodeType)); + final NodeState rhsNormalized = (rhs != null ? rhs : defaultUpNodeState(nodeType)); + + return lhsNormalized.similarToIgnoringInitProgress(rhsNormalized); + } + + private static NodeState defaultUpNodeState(final NodeType nodeType) { + return nodeType == NodeType.STORAGE + ? DEFAULT_STORAGE_UP_NODE_STATE + : DEFAULT_DISTRIBUTOR_UP_NODE_STATE; + } + /** * Fleet controller marks states that are actually sent out to nodes as official states. Only fleetcontroller * should set this to official, and only just before sending it out. This state is currently not serialized with @@ -97,7 +175,7 @@ public class ClusterState implements Cloneable { public void addNodeState() throws ParseException { if (!empty) { NodeState ns = NodeState.deserialize(node.getType(), sb.toString()); - if (!ns.equals(new NodeState(node.getType(), State.UP))) { + if (!ns.equals(defaultUpNodeState(node.getType()))) { nodeStates.put(node, ns); } if (nodeCount.get(node.getType().ordinal()) <= node.getIndex()) { diff --git a/vdslib/src/main/java/com/yahoo/vdslib/state/NodeState.java b/vdslib/src/main/java/com/yahoo/vdslib/state/NodeState.java index 8c31938dfaf..15c929fe49d 100644 --- a/vdslib/src/main/java/com/yahoo/vdslib/state/NodeState.java +++ b/vdslib/src/main/java/com/yahoo/vdslib/state/NodeState.java @@ -112,17 +112,27 @@ public class NodeState implements Cloneable { * Cluster state will check for that. */ public boolean similarTo(Object o) { - if (!(o instanceof NodeState)) { return false; } - NodeState other = (NodeState) o; + if (!(o instanceof NodeState)) { + return false; + } + return similarToImpl((NodeState)o, true); + } + + public boolean similarToIgnoringInitProgress(final NodeState other) { + return similarToImpl(other, false); + } + private boolean similarToImpl(final NodeState other, boolean considerInitProgress) { if (state != other.state) return false; if (Math.abs(capacity - other.capacity) > 0.0000000001) return false; if (Math.abs(reliability - other.reliability) > 0.0000000001) return false; if (startTimestamp != other.startTimestamp) return false; // Init progress on different sides of the init progress limit boundary is not similar. - if (type.equals(NodeType.STORAGE) - && initProgress < getListingBucketsInitProgressLimit() ^ other.initProgress < getListingBucketsInitProgressLimit()) + if (considerInitProgress + && type.equals(NodeType.STORAGE) + && (initProgress < getListingBucketsInitProgressLimit() + ^ other.initProgress < getListingBucketsInitProgressLimit())) { return false; } diff --git a/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java b/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java index c058a7c9919..0d06fcc6faa 100644 --- a/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java +++ b/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java @@ -1,10 +1,18 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vdslib.state; +import org.junit.Test; + import java.text.ParseException; +import java.util.function.BiFunction; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; -public class ClusterStateTestCase extends junit.framework.TestCase { +public class ClusterStateTestCase{ + @Test public void testSetNodeState() throws ParseException { ClusterState state = new ClusterState(""); assertEquals("", state.toString()); @@ -22,6 +30,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase { assertEquals("distributor:5 .0.s:d .2.s:d .3.s:d storage:1 .0.d:4 .0.d.1.s:d", state.toString()); } + @Test public void testClone() throws ParseException { ClusterState state = new ClusterState(""); state.setNodeState(new Node(NodeType.DISTRIBUTOR, 1), new NodeState(NodeType.DISTRIBUTOR, State.UP).setDescription("available")); @@ -31,8 +40,9 @@ public class ClusterStateTestCase extends junit.framework.TestCase { assertEquals(state.toString(true), other.toString(true)); assertEquals(state.toString(false), other.toString(false)); assertEquals(state, other); - } + } + @Test public void testEquals() throws ParseException { ClusterState state = new ClusterState(""); @@ -55,6 +65,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase { ClusterState state2 = new ClusterState("distributor:3 .1.s:d .2.s:m storage:3 .1.s:i .2.s:m"); assertFalse(state1.equals(state2)); assertFalse(state1.similarTo(state2)); + assertFalse(state1.similarToIgnoringInitProgress(state2)); } { @@ -62,6 +73,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase { ClusterState state2 = new ClusterState("cluster:d version:1 bits:20 distributor:1 storage:1 .0.s:d"); assertFalse(state1.equals(state2)); assertTrue(state1.similarTo(state2)); + assertTrue(state1.similarToIgnoringInitProgress(state2)); } { @@ -69,6 +81,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase { ClusterState state2 = new ClusterState("distributor:3 storage:3"); assertFalse(state1.equals(state2)); assertFalse(state1.similarTo(state2)); + assertFalse(state1.similarToIgnoringInitProgress(state2)); } assertFalse(state.equals("class not instance of ClusterState")); @@ -78,6 +91,92 @@ public class ClusterStateTestCase extends junit.framework.TestCase { assertTrue(state.similarTo(state)); } + private static ClusterState stateFromString(final String stateStr) { + try { + return new ClusterState(stateStr); + } catch (ParseException e) { + throw new RuntimeException(e); + } + } + + private void do_test_differing_storage_node_sets(BiFunction<ClusterState, ClusterState, Boolean> cmp) { + final ClusterState a = stateFromString("distributor:3 storage:3 .0.s:d"); + final ClusterState b = stateFromString("distributor:3 storage:3"); + assertFalse(cmp.apply(a, b)); + assertFalse(cmp.apply(b, a)); + assertTrue(cmp.apply(a, a)); + assertTrue(cmp.apply(b, b)); + } + + private void do_test_differing_distributor_node_sets(BiFunction<ClusterState, ClusterState, Boolean> cmp) { + final ClusterState a = stateFromString("distributor:3 .0.s:d storage:3"); + final ClusterState b = stateFromString("distributor:3 storage:3"); + assertFalse(cmp.apply(a, b)); + assertFalse(cmp.apply(b, a)); + assertTrue(cmp.apply(a, a)); + assertTrue(cmp.apply(b, b)); + } + + @Test + public void similarity_check_considers_differing_distributor_node_state_sets() { + do_test_differing_distributor_node_sets((a, b) -> a.similarTo(b)); + } + + @Test + public void similarity_check_considers_differing_storage_node_state_sets() { + do_test_differing_storage_node_sets((a, b) -> a.similarTo(b)); + } + + @Test + public void structural_similarity_check_considers_differing_distributor_node_state_sets() { + do_test_differing_distributor_node_sets((a, b) -> a.similarToIgnoringInitProgress(b)); + } + + @Test + public void init_progress_ignoring_similarity_check_considers_differing_storage_node_state_sets() { + do_test_differing_storage_node_sets((a, b) -> a.similarToIgnoringInitProgress(b)); + } + + private void do_test_similarity_for_down_cluster_state(BiFunction<ClusterState, ClusterState, Boolean> cmp) { + final ClusterState a = stateFromString("cluster:d distributor:3 .0.s:d storage:3 .2:s:d"); + final ClusterState b = stateFromString("cluster:d distributor:3 storage:3 .1:s:d"); + assertTrue(cmp.apply(a, b)); + assertTrue(cmp.apply(b, a)); + } + + @Test + public void similarity_check_considers_differing_down_cluster_states_similar() { + do_test_similarity_for_down_cluster_state((a, b) -> a.similarTo(b)); + } + + @Test + public void init_progress_ignoring__similarity_check_considers_differing_down_cluster_states_similar() { + do_test_similarity_for_down_cluster_state((a, b) -> a.similarToIgnoringInitProgress(b)); + } + + // If we naively only look at the NodeState sets in the ClusterState instances to be + // compared, we might get false positives. If state A has a NodeState(Up, minBits 15) + // while state B has NodeState(Up, minBits 16), the latter will be pruned away from the + // NodeState set because it's got a "default" Up state. The two states are still semantically + // similar, and should be returned as such. But their state sets technically differ. + @Test + public void similarity_check_does_not_consider_per_storage_node_min_bits() { + final ClusterState a = stateFromString("distributor:4 storage:4"); + final ClusterState b = stateFromString("distributor:4 storage:4"); + b.setNodeState(new Node(NodeType.STORAGE, 1), new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(15)); + assertTrue(a.similarTo(b)); + assertTrue(b.similarTo(a)); + } + + @Test + public void init_progress_ignoring_similarity_check_does_in_fact_ignore_init_progress() { + final ClusterState a = stateFromString("distributor:3 storage:3 .0.i:0.01 .1.i:0.1 .2.i:0.9"); + final ClusterState b = stateFromString("distributor:3 storage:3 .0.i:0.2 .1.i:0.5 .2.i:0.99"); + assertTrue(a.similarToIgnoringInitProgress(b)); + assertTrue(b.similarToIgnoringInitProgress(a)); + } + + @Test public void testTextDiff() throws ParseException { ClusterState state1 = new ClusterState("distributor:9 storage:4"); ClusterState state2 = new ClusterState("distributor:7 storage:6"); @@ -94,6 +193,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase { assertEquals("version: 123 => 0, bits: 16 => 21, official: false => true, storage: [2: [Initializing => Up, disks: 2 => 0, description: Booting => ], 4: Down => Up, 5: Down => Up], distributor: [7: Up => Down, 8: Up => Down]", state1.getTextualDifference(state2)); } + @Test public void testHtmlDiff() throws ParseException { ClusterState state1 = new ClusterState("distributor:9 storage:4"); ClusterState state2 = new ClusterState("distributor:7 storage:6"); @@ -133,7 +233,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase { "]", state1.getHtmlDifference(state2)); } - + @Test public void testParser() throws ParseException { ClusterState state = new ClusterState("distributor:2 storage:17 .2.s:d .13.s:r m:cluster\\x20message"); assertEquals("cluster message", state.getDescription()); @@ -191,17 +291,20 @@ public class ClusterStateTestCase extends junit.framework.TestCase { } catch (Exception e) {} } + @Test public void testCapacityExponential() throws ParseException { ClusterState state = new ClusterState("distributor:27 storage:170 .2.s:d .13.c:3E-8 .13.s:r"); - assertEquals(3E-8, state.getNodeState(new Node(NodeType.STORAGE, 13)).getCapacity()); + assertEquals(3E-8, state.getNodeState(new Node(NodeType.STORAGE, 13)).getCapacity(), 1E-8); } + @Test public void testCapacityExponentialCpp() throws ParseException { ClusterState state = new ClusterState("distributor:27 storage:170 .2.s:d .13.c:3e-08 .13.s:r"); - assertEquals(3E-8, state.getNodeState(new Node(NodeType.STORAGE, 13)).getCapacity()); + assertEquals(3E-8, state.getNodeState(new Node(NodeType.STORAGE, 13)).getCapacity(), 1E-8); } + @Test public void testSetState() throws ParseException { ClusterState state = new ClusterState("distributor:2 storage:2"); state.setNodeState(new Node(NodeType.DISTRIBUTOR, 0), new NodeState(NodeType.DISTRIBUTOR, State.DOWN)); @@ -209,6 +312,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase { assertEquals("distributor:2 .0.s:d storage:2", state.toString()); } + @Test public void testVersionAndClusterStates() throws ParseException { ClusterState state = new ClusterState("version:4 cluster:i distributor:2 .1.s:i storage:2 .0.s:i .0.i:0.345"); assertEquals(4, state.getVersion()); @@ -220,6 +324,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase { assertEquals("version:5 cluster:d bits:12 distributor:2 .1.s:i .1.i:1.0 storage:2 .0.s:i .0.i:0.345", state.toString()); } + @Test public void testNotRemovingCommentedDownNodesAtEnd() throws ParseException { ClusterState state = new ClusterState(""); state.setNodeState(new Node(NodeType.DISTRIBUTOR, 0), new NodeState(NodeType.DISTRIBUTOR, State.UP)); @@ -234,6 +339,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase { assertEquals("distributor:1 storage:2", state.toString(false)); } + @Test public void testWhitespace() throws ParseException { ClusterState state = new ClusterState("distributor:2\n .1.t:3\nstorage:2\n\t.0.s:i \r\f.1.s:m"); assertEquals(2, state.getNodeCount(NodeType.DISTRIBUTOR)); @@ -243,4 +349,22 @@ public class ClusterStateTestCase extends junit.framework.TestCase { assertEquals(new NodeState(NodeType.STORAGE, State.INITIALIZING), state.getNodeState(new Node(NodeType.STORAGE, 0))); assertEquals(new NodeState(NodeType.STORAGE, State.MAINTENANCE), state.getNodeState(new Node(NodeType.STORAGE, 1))); } + + @Test + public void empty_state_factory_method_returns_empty_state() { + final ClusterState state = ClusterState.emptyState(); + assertEquals("", state.toString()); + } + + @Test + public void state_from_string_factory_method_returns_cluster_state_constructed_from_input() { + final String stateStr = "version:123 distributor:2 storage:2"; + final ClusterState state = ClusterState.stateFromString(stateStr); + assertEquals(stateStr, state.toString()); + } + + @Test(expected=RuntimeException.class) + public void state_from_string_factory_method_throws_runtime_exception_on_parse_failure() { + ClusterState.stateFromString("fraggle rock"); + } } diff --git a/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java b/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java index 63137a92c7b..9362838b63c 100644 --- a/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java +++ b/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java @@ -165,6 +165,12 @@ public class NodeStateTestCase extends junit.framework.TestCase { assertFalse(ns2.similarTo(ns3)); assertTrue(ns3.similarTo(ns4)); + assertTrue(ns1.similarToIgnoringInitProgress(ns2)); + assertTrue(ns1.similarToIgnoringInitProgress(ns3)); + assertTrue(ns3.similarToIgnoringInitProgress(ns1)); + assertTrue(ns1.similarToIgnoringInitProgress(ns4)); + assertTrue(ns2.similarToIgnoringInitProgress(ns4)); + assertFalse(ns1.equals(ns2)); assertFalse(ns2.equals(ns3)); assertFalse(ns3.equals(ns4)); @@ -176,6 +182,7 @@ public class NodeStateTestCase extends junit.framework.TestCase { NodeState ns1 = new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(16); NodeState ns2 = new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(18); assertTrue(ns1.similarTo(ns2)); + assertTrue(ns1.similarToIgnoringInitProgress(ns2)); assertFalse(ns1.equals(ns2)); } { @@ -184,12 +191,14 @@ public class NodeStateTestCase extends junit.framework.TestCase { assertEquals(ns, ns2Disks); assertEquals(ns2Disks, ns); assertTrue(ns.similarTo(ns2Disks)); + assertTrue(ns.similarToIgnoringInitProgress(ns2Disks)); assertTrue(ns2Disks.similarTo(ns)); ns2Disks.getDiskState(0).setState(State.DOWN); assertFalse(ns.equals(ns2Disks)); assertFalse(ns2Disks.equals(ns)); assertFalse(ns.similarTo(ns2Disks)); + assertFalse(ns.similarToIgnoringInitProgress(ns2Disks)); assertFalse(ns2Disks.similarTo(ns)); } } diff --git a/vdslib/src/tests/container/CMakeLists.txt b/vdslib/src/tests/container/CMakeLists.txt index 99717430c28..a869d0fd40b 100644 --- a/vdslib/src/tests/container/CMakeLists.txt +++ b/vdslib/src/tests/container/CMakeLists.txt @@ -5,9 +5,7 @@ vespa_add_library(vdslib_containertest parameterstest.cpp searchresulttest.cpp documentsummarytest.cpp - smallvectortest.cpp lruordertest.cpp - indexedcontaineriteratortest.cpp DEPENDS vdslib ) diff --git a/vdslib/src/tests/container/indexedcontaineriteratortest.cpp b/vdslib/src/tests/container/indexedcontaineriteratortest.cpp deleted file mode 100644 index a7697116a15..00000000000 --- a/vdslib/src/tests/container/indexedcontaineriteratortest.cpp +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <cppunit/extensions/HelperMacros.h> -#include <vespa/vdslib/container/smallvector.h> -#include <sys/time.h> - -namespace storage { -namespace lib { - -struct IndexedContainerIteratorTest : public CppUnit::TestFixture { - - void testNormalUsage(); - void testSorting(); - - CPPUNIT_TEST_SUITE(IndexedContainerIteratorTest); - CPPUNIT_TEST(testNormalUsage); - CPPUNIT_TEST(testSorting); - CPPUNIT_TEST_SUITE_END(); -}; - -CPPUNIT_TEST_SUITE_REGISTRATION(IndexedContainerIteratorTest); - -void -IndexedContainerIteratorTest::testNormalUsage() -{ - typedef IndexedContainerIterator<std::vector<int>, int> Iterator; - { - std::vector<int> v; - Iterator begin = Iterator(v, 0); - Iterator end = Iterator(v, 0); - CPPUNIT_ASSERT_EQUAL(begin, Iterator(v, 0)); - CPPUNIT_ASSERT_EQUAL(end, Iterator(v, 0)); - CPPUNIT_ASSERT(begin == end); - } - { - std::vector<int> v; - v.push_back(5); - Iterator begin = Iterator(v, 0); - Iterator end = Iterator(v, 1); - CPPUNIT_ASSERT_EQUAL(begin, Iterator(v, 0)); - CPPUNIT_ASSERT_EQUAL(end, Iterator(v, 1)); - CPPUNIT_ASSERT(begin != end); - } -} - -void -IndexedContainerIteratorTest::testSorting() -{ - typedef IndexedContainerIterator<std::vector<int>, int> Iterator; - std::vector<int> v; - v.push_back(5); - v.push_back(9); - v.push_back(2); - std::sort(Iterator(v, 0), Iterator(v, 3)); - CPPUNIT_ASSERT_EQUAL(2, v[0]); - CPPUNIT_ASSERT_EQUAL(5, v[1]); - CPPUNIT_ASSERT_EQUAL(9, v[2]); -} - -} // lib -} // storage diff --git a/vdslib/src/tests/container/smallvectortest.cpp b/vdslib/src/tests/container/smallvectortest.cpp deleted file mode 100644 index ac046f80caa..00000000000 --- a/vdslib/src/tests/container/smallvectortest.cpp +++ /dev/null @@ -1,274 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <cppunit/extensions/HelperMacros.h> -#include <vespa/vdslib/container/smallvector.h> -#include <sys/time.h> - -namespace storage { -namespace lib { - -struct SmallVectorTest : public CppUnit::TestFixture { - - void testNormalUsage(); - void testPerformance(); - void testSwapVectorContents(); - void testErase(); - void testCopy(); - - CPPUNIT_TEST_SUITE(SmallVectorTest); - CPPUNIT_TEST(testNormalUsage); - CPPUNIT_TEST(testPerformance); - CPPUNIT_TEST(testSwapVectorContents); - CPPUNIT_TEST(testErase); - CPPUNIT_TEST(testCopy); - CPPUNIT_TEST_SUITE_END(); -}; - -CPPUNIT_TEST_SUITE_REGISTRATION(SmallVectorTest); - -namespace { - template<typename T> - inline std::ostream& operator<<(std::ostream& out, - const std::vector<T>& v) - { - out << "["; - for (size_t i=0; i<v.size(); ++i) { - out << "\n " << v[i]; - } - if (!v.empty()) out << "\n"; - out << "]"; - return out; - } - - template<typename T, size_t S> - void assertEqual(const SmallVector<T, S>& sv, const std::vector<T>& v) { - if (!(sv == v)) { - std::ostringstream ost; - ost << "Small vector " << sv << " is not equal to vector " << v; - CPPUNIT_FAIL(ost.str()); - } - } -} - -void -SmallVectorTest::testNormalUsage() -{ - std::vector<uint16_t> expected; - SmallVector<uint16_t, 8> actual; - for (uint16_t i=0; i<16; ++i) { - expected.push_back(i); - actual.push_back(i); - assertEqual(actual, expected); - } - - SmallVector<uint16_t, 8> copy(actual); - SmallVector<uint16_t, 16> copy2(actual); -} - -namespace { - - uint64_t getCurrentTimeInMicros() { - struct timeval mytime; - gettimeofday(&mytime, 0); - return mytime.tv_sec * 1000000llu + mytime.tv_usec; - } - - template<typename IntContainer> - struct PerformanceTestClass { - uint32_t count; - - PerformanceTestClass(int c) : count(c) {} - - IntContainer getContainer(int minVal) { - IntContainer result; - for (uint32_t i=0; i<count; ++i) { - result.push_back(minVal + i); - } - return result; - } - }; - - template<typename IntContainer> - uint64_t getPerformance(int containerSize) { - uint64_t start = getCurrentTimeInMicros(); - int value = 0; - PerformanceTestClass<IntContainer> foo(containerSize); - for (uint32_t i=0, n=10 * 1024; i<n; ++i) { - IntContainer ic(foo.getContainer(start)); - value += ic[0] + ic[1] - ic[2]; - } - uint64_t stop = getCurrentTimeInMicros(); - return (stop - start); - } - - struct ArgumentTestClass { - uint32_t count; - - ArgumentTestClass(int c) : count(c) {} - - void getContainer(int minVal, std::vector<int>& result) { - for (uint32_t i=0; i<count; ++i) { - result.push_back(minVal + i); - } - } - }; - - uint64_t getPerformance2(int containerSize) { - uint64_t start = getCurrentTimeInMicros(); - int value = 0; - ArgumentTestClass foo(containerSize); - for (uint32_t i=0, n=10 * 1024; i<n; ++i) { - std::vector<int> ic; - foo.getContainer(start, ic); - value += ic[0] + ic[1] - ic[2]; - } - uint64_t stop = getCurrentTimeInMicros(); - return (stop - start); - } -} - -void -SmallVectorTest::testPerformance() -{ - size_t low = 3; - size_t high = 16; - SmallVector<int> sv; - - CPPUNIT_ASSERT(low <= sv.getEfficientSizeLimit()); - CPPUNIT_ASSERT(high > sv.getEfficientSizeLimit()); - - uint64_t vectorTime1 = getPerformance<std::vector<int> >(low); - uint64_t smallVectorTime1 = getPerformance<SmallVector<int> >(low); - uint64_t asArgTime1 = getPerformance2(low); - - uint64_t vectorTime2 = getPerformance<std::vector<int> >(high); - uint64_t smallVectorTime2 = getPerformance<SmallVector<int> >(high); - uint64_t asArgTime2 = getPerformance2(high); - - double factor1 = static_cast<double>(vectorTime1) / smallVectorTime1; - double factor2 = static_cast<double>(vectorTime2) / smallVectorTime2; - - double factor3 = static_cast<double>(asArgTime1) / smallVectorTime1; - double factor4 = static_cast<double>(asArgTime2) / smallVectorTime2; - - std::cerr << "\n" - << " Small vector is " << factor1 - << " x faster than std::vector with few elements\n" - << " Small vector is " << factor2 - << " x faster than std::vector with many elements\n" - << " Small vector is " << factor3 - << " x faster than std::vector as arg with few elements\n" - << " Small vector is " << factor4 - << " x faster than std::vector as arg with many elements\n"; - - // At time of test writing, vector is ~43 times faster with small data, and - // ~0.9 times as fast on bigger. (Without vespa malloc) - // With vespa malloc it is about ~14 times faster. - - /* Cannot run on factory as too much other runs in parallel. - CPPUNIT_ASSERT(factor1 > 25); - CPPUNIT_ASSERT(factor2 > 0.5); - // */ -} - -void -SmallVectorTest::testSwapVectorContents() -{ - SmallVector<uint16_t, 8> v1; - SmallVector<uint16_t, 8> v2; - - // v1 small enough to be contained in fixed array. - for (uint16_t i = 0; i < 6; ++i) { - v1.push_back(i); - } - - // v2 big enough that it needs heap backed storage. - for (uint16_t i = 10; i < 30; ++i) { - v2.push_back(i); - } - - vespalib::string expectedSmall("[0, 1, 2, 3, 4, 5]"); - vespalib::string expectedBig("[10, 11, 12, 13, 14, 15, 16, 17, 18, 19, " - "20, 21, 22, 23, 24, 25, 26, 27, 28, 29]"); - - v1.swap(v2); - - CPPUNIT_ASSERT_EQUAL(expectedSmall, v2.toString()); - CPPUNIT_ASSERT_EQUAL(expectedBig, v1.toString()); - - swap(v1, v2); - CPPUNIT_ASSERT_EQUAL(expectedBig, v2.toString()); - CPPUNIT_ASSERT_EQUAL(expectedSmall, v1.toString()); -} - -void -SmallVectorTest::testErase() -{ - // Delete in small part of small array - { - SmallVector<uint16_t, 4> v = {3, 6, 5}; - v.erase(v.begin()); - CPPUNIT_ASSERT_EQUAL((SmallVector<uint16_t, 4>{6, 5}), v); - } - { - SmallVector<uint16_t, 4> v = {3, 6, 5}; - v.erase(v.begin() + 1); - CPPUNIT_ASSERT_EQUAL((SmallVector<uint16_t, 4>{3, 5}), v); - } - { - SmallVector<uint16_t, 4> v = {3, 6, 5}; - v.erase(v.begin() + 2); - CPPUNIT_ASSERT_EQUAL((SmallVector<uint16_t, 4>{3, 6}), v); - } - - // Delete in small part of large array - { - SmallVector<uint16_t, 4> v = {3, 6, 5, 7, 8}; - v.erase(v.begin()); - CPPUNIT_ASSERT_EQUAL((SmallVector<uint16_t, 4>{6, 5, 7, 8}), v); - } - { - SmallVector<uint16_t, 4> v = {3, 6, 5, 7, 8}; - v.erase(v.begin() + 1); - CPPUNIT_ASSERT_EQUAL((SmallVector<uint16_t, 4>{3, 5, 7, 8}), v); - } - { - SmallVector<uint16_t, 4> v = {3, 6, 5, 7, 8}; - v.erase(v.begin() + 2); - CPPUNIT_ASSERT_EQUAL((SmallVector<uint16_t, 4>{3, 6, 7, 8}), v); - } - - // Delete in extended part of small array - { - SmallVector<uint16_t, 1> v = {3, 6, 5}; - v.erase(v.begin()); - CPPUNIT_ASSERT_EQUAL((SmallVector<uint16_t, 1>{6, 5}), v); - } - { - SmallVector<uint16_t, 1> v = {3, 6, 5}; - v.erase(v.begin() + 1); - CPPUNIT_ASSERT_EQUAL((SmallVector<uint16_t, 1>{3, 5}), v); - } - { - SmallVector<uint16_t, 1> v = {3, 6, 5}; - v.erase(v.begin() + 2); - CPPUNIT_ASSERT_EQUAL((SmallVector<uint16_t, 1>{3, 6}), v); - } -} - -namespace { - void foo(const SmallVector<uint16_t, 4>&) { - } -} - -void -SmallVectorTest::testCopy() -{ - foo(SmallVector<uint16_t, 4>{3, 2}); - SmallVector<uint16_t, 4> v{1, 2, 3}; - foo(v); - foo({}); -} - -} // lib -} // storage diff --git a/vdslib/src/tests/distribution/CMakeLists.txt b/vdslib/src/tests/distribution/CMakeLists.txt index a82cb3ddec1..e4197920add 100644 --- a/vdslib/src/tests/distribution/CMakeLists.txt +++ b/vdslib/src/tests/distribution/CMakeLists.txt @@ -4,7 +4,6 @@ vespa_add_library(vdslib_testdistribution distributiontest.cpp grouptest.cpp idealnodecalculatorimpltest.cpp - idealnodecalculatorcachetest.cpp DEPENDS vdslib ) diff --git a/vdslib/src/tests/distribution/distributiontest.cpp b/vdslib/src/tests/distribution/distributiontest.cpp index 664c50a3fc9..be93ec614cf 100644 --- a/vdslib/src/tests/distribution/distributiontest.cpp +++ b/vdslib/src/tests/distribution/distributiontest.cpp @@ -3,7 +3,6 @@ #include <vespa/fastos/fastos.h> #include <vespa/vdslib/distribution/distribution.h> #include <vespa/vdslib/distribution/idealnodecalculator.h> -#include <boost/assign.hpp> #include <vespa/config/helper/configfetcher.h> #include <cmath> #include <chrono> @@ -1453,38 +1452,28 @@ DistributionTest::testActivePerGroup() void DistributionTest::testHierarchicalDistributeLessThanRedundancy() { - using namespace boost::assign; - Distribution distr("redundancy 4\n" - "active_per_leaf_group true\n" + groupConfig); + Distribution distr("redundancy 4\nactive_per_leaf_group true\n" + groupConfig); ClusterState state("storage:6"); std::vector<uint16_t> actual; { - distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, 0), - actual, "uim", 4); - std::vector<uint16_t> expected; - expected += 3, 5, 1, 2; + distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, 0), actual, "uim", 4); + std::vector<uint16_t> expected({3, 5, 1, 2}); CPPUNIT_ASSERT_EQUAL(expected, actual); } { - distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, 0), - actual, "uim", 3); - std::vector<uint16_t> expected; - expected += 3, 5, 1; + distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, 0), actual, "uim", 3); + std::vector<uint16_t> expected({3, 5, 1}); CPPUNIT_ASSERT_EQUAL(expected, actual); } { - distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, 0), - actual, "uim", 2); - std::vector<uint16_t> expected; - expected += 3, 1; + distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, 0), actual, "uim", 2); + std::vector<uint16_t> expected({3, 1}); CPPUNIT_ASSERT_EQUAL(expected, actual); } { - distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, 0), - actual, "uim", 1); - std::vector<uint16_t> expected; - expected += 3; + distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, 0), actual, "uim", 1); + std::vector<uint16_t> expected({3}); CPPUNIT_ASSERT_EQUAL(expected, actual); } } diff --git a/vdslib/src/tests/distribution/grouptest.cpp b/vdslib/src/tests/distribution/grouptest.cpp index 828a00ff7d0..b182c19d072 100644 --- a/vdslib/src/tests/distribution/grouptest.cpp +++ b/vdslib/src/tests/distribution/grouptest.cpp @@ -3,7 +3,6 @@ #include <vespa/fastos/fastos.h> #include <vespa/vdslib/distribution/group.h> -#include <boost/assign.hpp> #include <vespa/vespalib/text/stringtokenizer.h> #include <vespa/vdstestlib/cppunit/macros.h> diff --git a/vdslib/src/tests/distribution/idealnodecalculatorcachetest.cpp b/vdslib/src/tests/distribution/idealnodecalculatorcachetest.cpp deleted file mode 100644 index 8195d6dd070..00000000000 --- a/vdslib/src/tests/distribution/idealnodecalculatorcachetest.cpp +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/vdslib/distribution/idealnodecalculatorcache.h> -#include <vespa/vdslib/distribution/idealnodecalculatorimpl.h> -#include <vespa/vdstestlib/cppunit/macros.h> - -namespace storage { -namespace lib { - -struct IdealNodeCalculatorCacheTest : public CppUnit::TestFixture { - - /** Test that you get a correct result forwarded through simple. */ - void testSimple(); - /** Test that similar buckets use different cache slots. */ - void testLocalityCached(); - /** Test that buckets using same cache slot invalidates each other. */ - void testBucketsSameCacheSlot(); - /** Test that cache is invalidated on changes. */ - void testCacheInvalidationOnChanges(); - /** Test that values for different upstates are kept for themselves. */ - void testDifferentUpStates(); - /** Test that values for different node types are kept for themselves. */ - void testDifferentNodeTypes(); - /** - * Do a performance test, verifying that cache actually significantly - * increase performance. - */ - void testPerformance(); - - CPPUNIT_TEST_SUITE(IdealNodeCalculatorCacheTest); - CPPUNIT_TEST(testSimple); - CPPUNIT_TEST(testLocalityCached); - CPPUNIT_TEST(testBucketsSameCacheSlot); - CPPUNIT_TEST(testCacheInvalidationOnChanges); - CPPUNIT_TEST(testDifferentUpStates); - CPPUNIT_TEST(testDifferentNodeTypes); - CPPUNIT_TEST(testPerformance); - CPPUNIT_TEST_SUITE_END(); -}; - -CPPUNIT_TEST_SUITE_REGISTRATION(IdealNodeCalculatorCacheTest); - -void -IdealNodeCalculatorCacheTest::testSimple() -{ - ClusterState state("storage:10"); - Distribution distr(Distribution::getDefaultDistributionConfig(3, 10)); - IdealNodeCalculatorImpl::SP impl(new IdealNodeCalculatorImpl); - IdealNodeCalculatorCache cache(impl, 4); - - IdealNodeCalculatorConfigurable& configurable(cache); - IdealNodeCalculator& calc(cache); - configurable.setDistribution(distr); - configurable.setClusterState(state); - - std::string expected("[storage.8, storage.9, storage.6]"); - CPPUNIT_ASSERT_EQUAL( - expected, - calc.getIdealStorageNodes(document::BucketId(16, 5)).toString()); -} - -void -IdealNodeCalculatorCacheTest::testLocalityCached() -{ - ClusterState state("bits:6 storage:10"); - Distribution distr(Distribution::getDefaultDistributionConfig(3, 10)); - IdealNodeCalculatorImpl::SP impl(new IdealNodeCalculatorImpl); - IdealNodeCalculatorCache cache(impl, 1024); - - IdealNodeCalculatorConfigurable& configurable(cache); - IdealNodeCalculator& calc(cache); - configurable.setDistribution(distr); - configurable.setClusterState(state); - - std::vector<document::BucketId> local; - local.push_back(document::BucketId(15, 134)); - local.push_back(document::BucketId(16, 134)); - local.push_back(document::BucketId(17, 134)); - local.push_back(document::BucketId(17, 134 | (1 << 16))); - - for (uint32_t i=0; i<local.size(); ++i) { - calc.getIdealStorageNodes(local[i]); - } - - CPPUNIT_ASSERT_EQUAL(4u, cache.getMissCount()); - CPPUNIT_ASSERT_EQUAL(0u, cache.getHitCount()); - - for (uint32_t i=0; i<local.size(); ++i) { - calc.getIdealStorageNodes(local[i]); - } - - CPPUNIT_ASSERT_EQUAL(4u, cache.getMissCount()); - CPPUNIT_ASSERT_EQUAL(4u, cache.getHitCount()); -} - -void -IdealNodeCalculatorCacheTest::testBucketsSameCacheSlot() -{ - ClusterState state("bits:6 storage:10"); - Distribution distr(Distribution::getDefaultDistributionConfig(3, 10)); - IdealNodeCalculatorImpl::SP impl(new IdealNodeCalculatorImpl); - IdealNodeCalculatorCache cache(impl, 1); // Only one slot available - - IdealNodeCalculatorConfigurable& configurable(cache); - IdealNodeCalculator& calc(cache); - configurable.setDistribution(distr); - configurable.setClusterState(state); - - // See that you don't get same result as last one - std::string expected("[storage.8, storage.9, storage.6]"); - CPPUNIT_ASSERT_EQUAL( - expected, - calc.getIdealStorageNodes(document::BucketId(16, 5)).toString()); - expected = "[storage.8, storage.6, storage.1]"; - CPPUNIT_ASSERT_EQUAL( - expected, - calc.getIdealStorageNodes(document::BucketId(16, 6)).toString()); -} - -void -IdealNodeCalculatorCacheTest::testCacheInvalidationOnChanges() -{ - ClusterState state("bits:6 storage:10"); - Distribution distr(Distribution::getDefaultDistributionConfig(3, 10)); - IdealNodeCalculatorImpl::SP impl(new IdealNodeCalculatorImpl); - IdealNodeCalculatorCache cache(impl, 1); // Only one slot available - - IdealNodeCalculatorConfigurable& configurable(cache); - IdealNodeCalculator& calc(cache); - configurable.setDistribution(distr); - configurable.setClusterState(state); - - // See that you don't get same result as last one - std::string expected("[storage.8, storage.9, storage.6]"); - CPPUNIT_ASSERT_EQUAL( - expected, - calc.getIdealStorageNodes(document::BucketId(16, 5)).toString()); - - CPPUNIT_ASSERT_EQUAL(1u, cache.getMissCount()); - CPPUNIT_ASSERT_EQUAL(0u, cache.getHitCount()); - - configurable.setClusterState(state); - - CPPUNIT_ASSERT_EQUAL( - expected, - calc.getIdealStorageNodes(document::BucketId(16, 5)).toString()); - - CPPUNIT_ASSERT_EQUAL(2u, cache.getMissCount()); - CPPUNIT_ASSERT_EQUAL(0u, cache.getHitCount()); - - configurable.setDistribution(distr); - - CPPUNIT_ASSERT_EQUAL( - expected, - calc.getIdealStorageNodes(document::BucketId(16, 5)).toString()); - - CPPUNIT_ASSERT_EQUAL(3u, cache.getMissCount()); - CPPUNIT_ASSERT_EQUAL(0u, cache.getHitCount()); -} - -void -IdealNodeCalculatorCacheTest::testDifferentUpStates() -{ - ClusterState state("bits:6 storage:10 .6.s:m .8.s:r"); - Distribution distr(Distribution::getDefaultDistributionConfig(3, 10)); - IdealNodeCalculatorImpl::SP impl(new IdealNodeCalculatorImpl); - IdealNodeCalculatorCache cache(impl, 4); - - IdealNodeCalculatorConfigurable& configurable(cache); - IdealNodeCalculator& calc(cache); - configurable.setDistribution(distr); - configurable.setClusterState(state); - - std::string expected("[storage.9, storage.4, storage.1]"); - CPPUNIT_ASSERT_EQUAL( - expected, - calc.getIdealStorageNodes(document::BucketId(16, 5), - IdealNodeCalculator::UpInit).toString()); - - expected = "[storage.9, storage.6, storage.4]"; - CPPUNIT_ASSERT_EQUAL( - expected, - calc.getIdealStorageNodes( - document::BucketId(16, 5), - IdealNodeCalculator::UpInitMaintenance).toString()); -} - -void -IdealNodeCalculatorCacheTest::testDifferentNodeTypes() -{ - ClusterState state("bits:6 distributor:10 storage:10 .6.s:m .8.s:r"); - Distribution distr(Distribution::getDefaultDistributionConfig(3, 10)); - IdealNodeCalculatorImpl::SP impl(new IdealNodeCalculatorImpl); - IdealNodeCalculatorCache cache(impl, 4); - - IdealNodeCalculatorConfigurable& configurable(cache); - IdealNodeCalculator& calc(cache); - configurable.setDistribution(distr); - configurable.setClusterState(state); - - std::string expected("[storage.9, storage.4, storage.1]"); - CPPUNIT_ASSERT_EQUAL( - expected, - calc.getIdealStorageNodes(document::BucketId(16, 5)).toString()); - - expected = "[distributor.8]"; - CPPUNIT_ASSERT_EQUAL( - expected, - calc.getIdealDistributorNodes( - document::BucketId(16, 5)).toString()); -} - -namespace { - - uint64_t getCurrentTimeInMicros() { - struct timeval mytime; - gettimeofday(&mytime, 0); - return mytime.tv_sec * 1000000llu + mytime.tv_usec; - } - - void addBucketTree(std::vector<document::BucketId>& v, - uint64_t location, - uint32_t currentUsedBits, - uint32_t maxUsedBits) - { - document::BucketId id(currentUsedBits, location); - v.push_back(id); - if (currentUsedBits < maxUsedBits) { - addBucketTree(v, location, - currentUsedBits + 1, maxUsedBits); - addBucketTree(v, location | (uint64_t(1) << currentUsedBits), - currentUsedBits + 1, maxUsedBits); - } - } - - uint64_t runPerformanceTest(IdealNodeCalculator& calc) { - std::vector<document::BucketId> buckets; - - // Addvarious location split levels for a user - addBucketTree(buckets, 123, 20, 22); - // Add various gid bit split levels for a user - addBucketTree(buckets, 123, 40, 42); - - { - std::set<document::BucketId> uniqueBuckets; - for (uint32_t i=0; i<buckets.size(); ++i) { - uniqueBuckets.insert(buckets[i]); - calc.getIdealStorageNodes(buckets[i]); - } - CPPUNIT_ASSERT_EQUAL(buckets.size(), uniqueBuckets.size()); - CPPUNIT_ASSERT_EQUAL(size_t(14), buckets.size()); - } - IdealNodeCalculatorCache* cache(dynamic_cast<IdealNodeCalculatorCache*>( - &calc)); - if (cache != 0) cache->clearCounts(); - uint32_t value; - uint64_t start = getCurrentTimeInMicros(); - for (uint32_t j=0; j<1024; ++j) { - for (uint32_t i=0; i<buckets.size(); ++i) { - IdealNodeList result(calc.getIdealStorageNodes(buckets[i])); - value += (result[0].getIndex() + result[1].getIndex()) - / result[2].getIndex(); - } - } - uint64_t stop = getCurrentTimeInMicros(); - return (stop - start); - } - - struct MapIdealNodeCalculator : public IdealNodeCalculator { - mutable std::map<document::BucketId, IdealNodeList> values; - const IdealNodeCalculator& calc; - - MapIdealNodeCalculator(const IdealNodeCalculator& c) : calc(c) {} - - virtual IdealNodeList getIdealNodes(const NodeType& nodeType, - const document::BucketId& bucketId, - UpStates upStates) const - { - std::map<document::BucketId, IdealNodeList>::const_iterator it( - values.find(bucketId)); - if (it != values.end()) return it->second; - IdealNodeList result( - calc.getIdealNodes(nodeType, bucketId, upStates)); - values[bucketId] = result; - return result; - } - }; -} - -void -IdealNodeCalculatorCacheTest::testPerformance() -{ - ClusterState state("bits:18 distributor:100 storage:100 .6.s:m .8.s:r"); - Distribution distr(Distribution::getDefaultDistributionConfig(3, 100)); - IdealNodeCalculatorImpl::SP impl(new IdealNodeCalculatorImpl); - impl->setDistribution(distr); - impl->setClusterState(state); - - uint64_t rawPerformance = runPerformanceTest(*impl); - - IdealNodeCalculatorCache cache(impl, 14); - - uint64_t cachePerformance = runPerformanceTest(cache); - double hitrate = (100.0 * cache.getHitCount() - / (cache.getHitCount() + cache.getMissCount())); - CPPUNIT_ASSERT(hitrate > 99.99); - - MapIdealNodeCalculator mapCalc(*impl); - - uint64_t mapPerformance = runPerformanceTest(mapCalc); - - IdealNodeCalculatorCache cache2(impl, 13); - uint64_t cacheMissPerformance = runPerformanceTest(cache2); - double hitrate2 = (100.0 * cache2.getHitCount() - / (cache2.getHitCount() + cache2.getMissCount())); - CPPUNIT_ASSERT(hitrate2 < 0.01); - - std::cerr << "\n" - << " Cache is " - << (static_cast<double>(rawPerformance) / cachePerformance) - << " x faster than skipping cache with 100% hitrate\n" - << " Cache is " - << (static_cast<double>(mapPerformance) / cachePerformance) - << " x faster than std::map cache with all data\n" - << " Cache is " - << (static_cast<double>(rawPerformance) / cacheMissPerformance) - << " x faster than skipping cache with 0% hitrate\n"; -} - -} // lib -} // storage diff --git a/vdslib/src/vespa/vdslib/container/smallvector.h b/vdslib/src/vespa/vdslib/container/smallvector.h deleted file mode 100644 index cd6636536c7..00000000000 --- a/vdslib/src/vespa/vdslib/container/smallvector.h +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -/** - * A vector type implementation that is optimized for keeping a small amount of - * elements. If a small amount is kept, no malloc will be done within the - * vector implementation. - */ - -#pragma once - -#include <boost/array.hpp> -#include <vespa/fastos/fastos.h> -#include <iterator> -#include <memory> -#include <vector> -#include <vespa/vespalib/util/printable.h> - -namespace storage { -namespace lib { - -/** - * A generic iterator implementation using size() and operator[] to access - * elements. - */ -template<typename Container, typename T> -class IndexedContainerIterator - : public std::iterator<std::random_access_iterator_tag, T>, - public vespalib::AsciiPrintable -{ - Container* _container; - uint64_t _index; - -public: - typedef IndexedContainerIterator<Container, T> Iterator; - typedef typename std::iterator<std::random_access_iterator_tag, T>::difference_type difference_type; - // Required to be possible to default construct iterators - IndexedContainerIterator() : _container(0), _index(-1) {} - IndexedContainerIterator(Container& c, uint64_t index) - : _container(&c), _index(index) {} - - T& operator*() { return (*_container)[_index]; } - T* operator->() { return &(*_container)[_index]; } - bool operator==(const Iterator& o) const { - return (_index == o._index); - } - bool operator!=(const Iterator& o) const { - return (_index != o._index); - } - bool operator<(const Iterator& o) const { - return (_index < o._index); - } - - Iterator& operator++() { - ++_index; - return *this; - } - Iterator operator++(int) { - return Iterator(*_container, _index++); - } - Iterator& operator--() { - --_index; - return *this; - } - Iterator operator--(int) { - return Iterator(*_container, _index--); - } - - Iterator operator+(const difference_type& v) { - return Iterator(*_container, _index + v); - } - difference_type operator+(const Iterator& o) { - return _index + o._index; - } - Iterator operator-(const difference_type& v) { - return Iterator(*_container, _index - v); - } - difference_type operator-(const Iterator& o) { - return _index - o._index; - } - - void print(vespalib::asciistream& out, const PrintProperties& p) const - { - out << "Iterator."; - if (_index >= _container->size()) { - out << "end"; - } else { - out << _index; - if (p.verbose()) { - out << "(" << (*_container)[_index] << ")"; - } - } - } -}; - -template <typename T, size_t S = 8> -class SmallVector : public vespalib::AsciiPrintable { - size_t _size; - boost::array<T, S> _smallVector; - mutable std::unique_ptr< std::vector<T> > _bigVector; - -public: - typedef IndexedContainerIterator<SmallVector<T, S>, T> iterator; - typedef IndexedContainerIterator<const SmallVector<T, S>, const T> - const_iterator; - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef size_t difference_type; - typedef size_t size_type; - - iterator begin() { return iterator(*this, 0); } - iterator end() { return iterator(*this, _size); } - const_iterator begin() const { return const_iterator(*this, 0); } - const_iterator end() const { return const_iterator(*this, _size); } - - SmallVector() : _size(0) {} - - SmallVector(std::initializer_list<T> elems) - : _size(0) - { - for (auto it=elems.begin(); it != elems.end(); ++it) { - push_back(*it); - } - } - - /** Copy needs to be efficient. That's the whole basis for this class. */ - SmallVector(const SmallVector<T, S>& other) { - operator=(other); - } - SmallVector<T, S>& operator=(const SmallVector<T, S>& other) { - _size = other.size(); - _smallVector = other._smallVector; - if (other._bigVector.get() != 0) { - _bigVector.reset(new std::vector<T>()); - *_bigVector = *other._bigVector; - } else { - _bigVector.reset(); - } - return *this; - } - - template<size_t S2> - SmallVector(const SmallVector<T, S2>& other) { - operator=(other); - } - template<size_t S2> - SmallVector<T, S>& operator=(const SmallVector<T, S2>& other) { - clear(); - for (uint32_t i=0, n=other.size(); i<n; ++i) { - push_back(other[i]); - } - return *this; - } - - size_t getEfficientSizeLimit() const { return S; } - - void push_back(const T& t) { - if (_size < S) { - _smallVector[_size] = t; - ++_size; - } else { - if (_size == S && _bigVector.get() == 0) { - populateVector(); - } - _bigVector->push_back(t); - ++_size; - } - } - void pop_back() { - if (_size <= S) { - --_size; - } else { - if (--_size == S) { - _bigVector.reset(); - } else { - _bigVector->pop_back(); - } - } - } - const T& back() const { return operator[](_size - 1); } - T& back() { return operator[](_size - 1); } - const T& front() const { return operator[](0); } - T& front() { return operator[](0); } - void clear() { - _size = 0; - _bigVector.reset(0); - } - const T& operator[](size_t i) const { - if (i < S) { - return _smallVector[i]; - } else { - return (*_bigVector)[i]; - } - } - T& operator[](size_t i) { - if (i < S) { - return _smallVector[i]; - } else { - return (*_bigVector)[i]; - } - } - bool empty() const { return (_size == 0); } - size_t size() const { return _size; } - - std::vector<T> getVector() const { - std::vector<T> result; - if (_bigVector.get() == 0) { - for (size_t i=0; i<_size; ++i) { - result.push_back(_smallVector[i]); - } - } else { - result = *_bigVector; - } - return *_bigVector; - } - - template<typename O> - bool operator==(const O& o) const { - if (size() != o.size()) return false; - for (size_t i=0; i<_size; ++i) { - if ((*this)[i] != o[i]) return false; - } - return true; - } - template<typename O> - bool operator!=(const O& o) const { - return !(operator==(o)); - } - - void swap(SmallVector<T, S>& other) { - // Move current into temporaries - size_t copySize(_size); - boost::array<T, S> copySmall(_smallVector); - std::unique_ptr< std::vector<T> > copyBig(std::move(_bigVector)); - // Overwrite current with other - _size = other._size; - _smallVector = other._smallVector; - _bigVector = std::move(other._bigVector); - // Overwrite other with temporaries - other._size = copySize; - other._smallVector = copySmall; - other._bigVector = std::move(copyBig); - } - - void print(vespalib::asciistream& out, const PrintProperties& p) const { - if (_size == 0) { - out << "[]"; - return; - } - vespalib::asciistream ost; - ost << operator[](0); - bool newLineBetweenEntries = (ost.str().size() > 15); - out << "["; - for (size_t i=0; i<_size; ++i) { - if (i != 0) out << ","; - if (newLineBetweenEntries) { - out << "\n" << p.indent(1); - } else { - if (i != 0) { out << " "; } - } - out << operator[](i); - } - if (newLineBetweenEntries) { - out << "\n" << p.indent(); - } - out << "]"; - } - void erase(iterator eraseIt) { - SmallVector<T, S> copy; - for (auto it = begin(); it != end(); ++it) { - if (it != eraseIt) { - copy.push_back(*it); - } - } - copy.swap(*this); - } - -private: - void populateVector() const { - assert(_bigVector.get() == 0 && _size == S); - _bigVector.reset(new std::vector<T>()); - for (size_t i=0; i<S; ++i) { - _bigVector->push_back(_smallVector[i]); - } - } -}; - -template <typename T, size_t S> -void -swap(SmallVector<T, S>& v1, SmallVector<T, S>& v2) { - v1.swap(v2); -} - -} // lib -} // storage diff --git a/vdslib/src/vespa/vdslib/distribution/group.cpp b/vdslib/src/vespa/vdslib/distribution/group.cpp index f4f94fefe0b..0ffe937a949 100644 --- a/vdslib/src/vespa/vdslib/distribution/group.cpp +++ b/vdslib/src/vespa/vdslib/distribution/group.cpp @@ -5,7 +5,6 @@ #include <vespa/vdslib/state/random.h> #include <vespa/vespalib/util/exceptions.h> -#include <boost/lexical_cast.hpp> #include <algorithm> namespace storage { diff --git a/vdslib/src/vespa/vdslib/distribution/group.h b/vdslib/src/vespa/vdslib/distribution/group.h index 138a466a856..d4952941dbd 100644 --- a/vdslib/src/vespa/vdslib/distribution/group.h +++ b/vdslib/src/vespa/vdslib/distribution/group.h @@ -11,7 +11,6 @@ */ #pragma once -#include <boost/operators.hpp> #include <map> #include <vector> #include <vespa/vespalib/objects/floatingpointtype.h> @@ -26,7 +25,7 @@ namespace lib { class IdealGroup; class SystemState; -class Group : public document::Printable, public boost::operators<Group> +class Group : public document::Printable { public: typedef std::unique_ptr<Group> UP; diff --git a/vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h b/vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h index 7bab3ff4af4..2f43907801e 100644 --- a/vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h +++ b/vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h @@ -7,7 +7,6 @@ #pragma once #include <vespa/document/bucket/bucketid.h> -#include <vespa/vdslib/container/smallvector.h> #include <vespa/vdslib/state/clusterstate.h> #include <vespa/vdslib/distribution/distribution.h> #include <vespa/vdslib/state/nodetype.h> @@ -20,7 +19,7 @@ namespace lib { * unneeded details, and make it easily printable. */ class IdealNodeList : public document::Printable { - SmallVector<Node> _idealNodes; + std::vector<Node> _idealNodes; public: IdealNodeList() : _idealNodes() {} diff --git a/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorcache.h b/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorcache.h deleted file mode 100644 index 54ffba869bd..00000000000 --- a/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorcache.h +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -/** - * A cache for an ideal nodes implementation. - * - * The cache is localized for quick, localized access. - * - There is only one spot one request can be cached, so one can quickly - * look whether there is a cache entry on that spot. - * - Use LSB bits of bucket to lookup entry such that localized entries use - * separate cache spots. - * - * - * Making it cheap for localized - * access, regardless of real implementation. Basically, uses LSB bits for - * buckets, as these are the bits that differ on localized access. - */ -#pragma once - -#include <vespa/vdslib/container/lruorder.h> -#include <vespa/vdslib/distribution/idealnodecalculator.h> -#include <vespa/vespalib/stllike/hash_map.h> -#include <vespa/vespalib/util/linkedptr.h> - -namespace storage { -namespace lib { - -class IdealNodeCalculatorCache : public IdealNodeCalculatorConfigurable { - typedef document::BucketId BucketId; - - /** Cache for all buckets for one given type (same upstate and nodetypes) */ - class TypeCache { - struct Entry { - IdealNodeList _result; - LruOrder<BucketId, TypeCache>::EntryRef _order; - }; - typedef vespalib::hash_map<BucketId, Entry, BucketId::hash> EntryMap; - - const IdealNodeCalculator& _calc; - const NodeType& _nodeType; - UpStates _upStates; - LruOrder<BucketId, TypeCache> _order; - EntryMap _entries; - uint32_t _hitCount; - uint32_t _missCount; - public: - typedef vespalib::LinkedPtr<TypeCache> LP; - - TypeCache(const IdealNodeCalculator& c, const NodeType& t, - UpStates us, uint32_t size) - : _calc(c), _nodeType(t), _upStates(us), _order(size, *this), - _hitCount(0), _missCount(0) {} - - IdealNodeList get(const document::BucketId& bucket) { - EntryMap::const_iterator it(_entries.find(bucket)); - if (it == _entries.end()) { - ++_missCount; - Entry& newEntry(_entries[bucket]); - newEntry._result = _calc.getIdealNodes( - _nodeType, bucket, _upStates); - newEntry._order = _order.add(bucket); - return newEntry._result; - } else { - ++_hitCount; - _order.moveToStart(it->second._order); - return it->second._result; - } - } - - void removedFromOrder(const BucketId& bucket) { - _entries.erase(bucket); - } - - void clearCache() { - _entries.clear(); - _order.clear(); - } - - uint32_t getHitCount() const { return _hitCount; } - uint32_t getMissCount() const { return _missCount; } - void clearCounts() { - _hitCount = 0; - _missCount = 0; - } - }; - IdealNodeCalculatorConfigurable::SP _calculator; - std::vector<TypeCache::LP> _cache; - -public: - IdealNodeCalculatorCache(IdealNodeCalculatorConfigurable::SP calc, - uint32_t cacheSizePerUpTypeCache) - : _calculator(calc) - { - initCache(cacheSizePerUpTypeCache, *calc); - } - - virtual void setDistribution(const Distribution& d) { - clearCache(); - _calculator->setDistribution(d); - } - - virtual void setClusterState(const ClusterState& cs) { - clearCache(); - _calculator->setClusterState(cs); - } - - virtual IdealNodeList getIdealNodes(const NodeType& nodeType, - const document::BucketId& bucket, - UpStates upStates) const - { - uint16_t requestType(getCacheType(nodeType, upStates)); - return _cache[requestType]->get(bucket); - } - - uint32_t getHitCount() const { - uint32_t count = 0; - for (uint32_t i=0; i<_cache.size(); ++i) { - count += _cache[i]->getHitCount(); - } - return count; - } - - uint32_t getMissCount() const { - uint32_t count = 0; - for (uint32_t i=0; i<_cache.size(); ++i) { - count += _cache[i]->getMissCount(); - } - return count; - } - - void clearCounts() { - for (uint32_t i=0; i<_cache.size(); ++i) { - _cache[i]->clearCounts(); - } - } - -private: - void clearCache() { - for (size_t i=0; i<_cache.size(); ++i) { - _cache[i]->clearCache(); - } - } - - void initCache(uint32_t size, IdealNodeCalculator& calc) { - _cache.resize(2 * UP_STATE_COUNT); - for (uint32_t i=0; i<2; ++i) { - const NodeType& nt(i == 0 ? NodeType::DISTRIBUTOR - : NodeType::STORAGE); - for (uint32_t j=0; j<UP_STATE_COUNT; ++j) { - UpStates upStates = (UpStates) j; - uint16_t type = getCacheType(nt, upStates); - _cache[type].reset(new TypeCache(calc, nt, upStates, size)); - } - } - } - - static uint16_t getCacheType(const NodeType& nt, UpStates s) { - uint16_t typeEnum = nt; - return (s << 1) | typeEnum; - } -}; - -} // lib -} // storage diff --git a/vdslib/src/vespa/vdslib/state/clusterstate.cpp b/vdslib/src/vespa/vdslib/state/clusterstate.cpp index 0c65d22cfab..ce3dffbfb82 100644 --- a/vdslib/src/vespa/vdslib/state/clusterstate.cpp +++ b/vdslib/src/vespa/vdslib/state/clusterstate.cpp @@ -2,8 +2,6 @@ #include <vespa/fastos/fastos.h> #include <vespa/vdslib/state/clusterstate.h> -#include <boost/cast.hpp> -#include <boost/lexical_cast.hpp> #include <vespa/vespalib/text/stringtokenizer.h> #include <vespa/document/util/stringutil.h> #include <vespa/log/log.h> diff --git a/vespa-http-client/pom.xml b/vespa-http-client/pom.xml index 9e2325e8016..563e4406fe9 100644 --- a/vespa-http-client/pom.xml +++ b/vespa-http-client/pom.xml @@ -83,6 +83,12 @@ <artifactId>airline</artifactId> <version>0.6</version> </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>testutil</artifactId> + <version>${project.version}</version> + <scope>test</scope> + </dependency> </dependencies> <build> <plugins> diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Endpoint.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Endpoint.java index b23480f9251..6c2e162f15b 100644 --- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Endpoint.java +++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Endpoint.java @@ -41,7 +41,15 @@ public final class Endpoint { private final boolean useSsl; private static final int DEFAULT_PORT = 4080; private Endpoint(String hostname, int port, boolean useSsl) { - this.hostname = hostname; + if (hostname.startsWith("https://")) { + throw new RuntimeException("Hostname should be name of machine, not prefixed with protocol (https://)"); + } + // A lot of people put http:// before the servername, let us allow that. + if (hostname.startsWith("http://")) { + this.hostname = hostname.replaceFirst("http://", ""); + } else { + this.hostname = hostname; + } this.port = port; this.useSsl = useSsl; } diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java index 64836ccb20d..d7f001eff31 100644 --- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java +++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java @@ -13,6 +13,7 @@ import java.nio.charset.CodingErrorAction; import java.nio.charset.StandardCharsets; import java.time.Instant; import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.TimeUnit; /** * Implementation of FeedClient. It is a thin layer on top of multiClusterHandler and multiClusterResultAggregator. @@ -21,10 +22,14 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; public class FeedClientImpl implements FeedClient { private final OperationProcessor operationProcessor; + private final long closeTimeoutMs; + private final long sleepTimeMs = 500; public FeedClientImpl( SessionParams sessionParams, ResultCallback resultCallback, ScheduledThreadPoolExecutor timeoutExecutor) { - + this.closeTimeoutMs = sessionParams.getConnectionParams().getMaxRetries() * ( + sessionParams.getFeedParams().getServerTimeout(TimeUnit.MILLISECONDS) + + sessionParams.getFeedParams().getClientTimeout(TimeUnit.MILLISECONDS)); this.operationProcessor = new OperationProcessor( new IncompleteResultsThrottler( sessionParams.getThrottlerMinSize(), @@ -53,13 +58,15 @@ public class FeedClientImpl implements FeedClient { @Override public void close() { - Instant startTime = Instant.now(); - while (operationProcessor.getIncompleteResultQueueSize() > 0 && startTime.plusSeconds(30).isAfter(Instant.now())) { - try { - Thread.sleep(500); - } catch (InterruptedException e) { - break; + Instant lastResultReceived = Instant.now(); + long lastNumberOfResults = operationProcessor.getIncompleteResultQueueSize(); + + while (waitForOperations(lastResultReceived, lastNumberOfResults, sleepTimeMs, closeTimeoutMs)) { + long results = operationProcessor.getIncompleteResultQueueSize(); + if (results != lastNumberOfResults) { + lastResultReceived = Instant.now(); } + lastNumberOfResults = results; } operationProcessor.close(); } @@ -68,4 +75,20 @@ public class FeedClientImpl implements FeedClient { public String getStatsAsJson() { return operationProcessor.getStatsAsJson(); } + + // On return value true, wait more. Public for testing. + public static boolean waitForOperations(Instant lastResultReceived, long lastNumberOfResults, long sleepTimeMs, long closeTimeoutMs) { + if (lastNumberOfResults == 0) { + return false; + } + if (lastResultReceived.plusMillis(closeTimeoutMs).isBefore(Instant.now())) { + return false; + } + try { + Thread.sleep(sleepTimeMs); + } catch (InterruptedException e) { + return false; + } + return true; + } } diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java index 199f3dcbaa8..414ae90dd27 100644 --- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java +++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java @@ -114,8 +114,8 @@ public class ClusterConnection implements AutoCloseable { IOThread ioThread = ioThreads.get(hash % ioThreads.size()); try { ioThread.post(document); - } catch (InterruptedException e) { - throw new EndpointIOException(ioThread.getEndpoint(), "While sending", e); + } catch (Throwable t) { + throw new EndpointIOException(ioThread.getEndpoint(), "While sending", t); } } diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java index 505039cd2d4..60324eda47a 100644 --- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java +++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java @@ -136,33 +136,18 @@ class IOThread implements Runnable, AutoCloseable { stopSignal.countDown(); log.finer("Closed called."); - try { - if (! running.await(2 * localQueueTimeOut, TimeUnit.MILLISECONDS)) { - log.info("Waited " + 2 * localQueueTimeOut - + " ms for queue to be empty, did not happen, interrupting thread."); - } - } catch (InterruptedException e) { - log.log(Level.INFO, "Interrupted while waiting for threads to finish sending.", e); - } - - // Make 5 attempts the next 30 secs to get results from previous operations. - for (int i = 0 ; i < 5; i++) { - int size = resultQueue.getPendingSize(); - if (size == 0) break; - log.info("We have outstanding operations (" + size +") , waiting for responses, iteraton: " + i + "."); + // Make a last attempt to get results from previous operations, we have already waited quite a bit before getting here. + int size = resultQueue.getPendingSize(); + if (size > 0) { + log.info("We have outstanding operations (" + size + ") , trying to fetch responses."); try { processResponse(client.drain()); } catch (Throwable e) { log.log(Level.SEVERE, "Some failures while trying to get latest responses from vespa.", e); - break; - } - try { - Thread.sleep(6000); - } catch (InterruptedException e) { - break; } } + try { client.close(); } finally { diff --git a/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/config/EndpointTest.java b/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/config/EndpointTest.java index e5718a4ba20..ab59bde5485 100644 --- a/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/config/EndpointTest.java +++ b/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/config/EndpointTest.java @@ -24,6 +24,17 @@ public class EndpointTest { } @Test + public void testBasicWithHttpProtocolPrefix() { + Endpoint endpoint = Endpoint.create("http://foo"); + assertThat(endpoint.getHostname(), equalTo("foo")); + } + + @Test(expected = RuntimeException.class) + public void testBasicWithHttpsProtocolPrefix() { + Endpoint.create("https://foo"); + } + + @Test public void testAdvanced() { Endpoint endpoint = Endpoint.create("bar", 1234, true); diff --git a/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/api/FeedClientImplTest.java b/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/api/FeedClientImplTest.java new file mode 100644 index 00000000000..79d66be9f97 --- /dev/null +++ b/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/api/FeedClientImplTest.java @@ -0,0 +1,31 @@ +package com.yahoo.vespa.http.client.core.api; + +import org.junit.Test; + +import java.time.Instant; + +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.*; + +/** + * @author dybis + */ +public class FeedClientImplTest { + + int sleepValueMillis = 1; + + @Test + public void testCloseWaitTimeOldTimestamp() { + assertThat(FeedClientImpl.waitForOperations(Instant.now().minusSeconds(1000), 1, sleepValueMillis, 10), is(false)); + } + + @Test + public void testCloseWaitTimeOutInFutureStillOperations() { + assertThat(FeedClientImpl.waitForOperations(Instant.now(), 1, sleepValueMillis, 2000), is(true)); + } + + @Test + public void testCloseWaitZeroOperations() { + assertThat(FeedClientImpl.waitForOperations(Instant.now(), 0, sleepValueMillis, 2000), is(false)); + } +}
\ No newline at end of file diff --git a/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessorTest.java b/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessorTest.java index c87385ec2ce..0eb3fc12405 100644 --- a/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessorTest.java +++ b/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessorTest.java @@ -13,11 +13,16 @@ import org.junit.Test; import java.util.ArrayDeque; import java.util.Queue; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a> @@ -367,4 +372,32 @@ public class OperationProcessorTest { assertThat(done.await(120, TimeUnit.SECONDS), is(true)); } + + @Test + public void testSendsResponseToQueuedDocumentOnClose() throws InterruptedException { + SessionParams sessionParams = new SessionParams.Builder() + .addCluster(new Cluster.Builder().addEndpoint(Endpoint.create("#$#")).build()) + .build(); + + ScheduledThreadPoolExecutor executor = mock(ScheduledThreadPoolExecutor.class); + when(executor.awaitTermination(anyLong(), any())).thenReturn(true); + + CountDownLatch countDownLatch = new CountDownLatch(3); + + OperationProcessor operationProcessor = new OperationProcessor( + new IncompleteResultsThrottler(19, 19, null, null), + (docId, documentResult) -> { + countDownLatch.countDown(); + }, + sessionParams, executor); + + // Will fail due to bogus host name, but will be retried. + operationProcessor.sendDocument(doc1); + operationProcessor.sendDocument(doc2); + operationProcessor.sendDocument(doc3); + + // Will create fail results. + operationProcessor.close(); + countDownLatch.await(); + } } diff --git a/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/runner/JsonReaderTest.java b/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/runner/JsonReaderTest.java index ab7dca0d5fb..eba8791bbb7 100644 --- a/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/runner/JsonReaderTest.java +++ b/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/runner/JsonReaderTest.java @@ -1,7 +1,6 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.http.client.runner; -import com.google.common.base.Joiner; import com.yahoo.vespa.http.client.FeedClient; import com.yahoo.vespa.http.client.core.JsonReader; import org.junit.Test; @@ -13,6 +12,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import static com.yahoo.test.json.JsonTestHelper.inputJson; import static org.hamcrest.core.Is.is; import static org.junit.Assert.*; @@ -260,13 +260,4 @@ public class JsonReaderTest { assertThat(session.documentIds.size(), is(1)); assertThat(session.documentIds.get(0), is("id:foo:music:doc:foo:bar")); } - - /** - * Convenience method to input JSON without escaping double quotes and newlines - * Each parameter represents a line of JSON encoded data - * The lines are joined with newline and single quotes are replaced with double quotes - */ - static String inputJson(String... lines) { - return Joiner.on("\n").join(lines).replaceAll("'", "\""); - } }
\ No newline at end of file diff --git a/vespabase/src/start-cbinaries.sh b/vespabase/src/start-cbinaries.sh index 91ce8edede4..1809f889244 100755 --- a/vespabase/src/start-cbinaries.sh +++ b/vespabase/src/start-cbinaries.sh @@ -83,6 +83,8 @@ if [ "$VESPA_USE_VALGRIND" = "all" ]; then no_valgrind=false fi +export STD_THREAD_PREVENT_TRY_CATCH=true + # special malloc setup; we should make some better mechanism for this # export GLIBCXX_FORCE_NEW=1 diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java index 4889d064387..6a7797c20a7 100644 --- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java +++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java @@ -23,6 +23,7 @@ import com.yahoo.document.restapi.RestUri; import com.yahoo.documentapi.messagebus.MessageBusDocumentAccess; import com.yahoo.documentapi.messagebus.MessageBusParams; import com.yahoo.documentapi.messagebus.loadtypes.LoadTypeSet; +import com.yahoo.vespa.config.content.LoadTypeConfig; import com.yahoo.vespaxmlparser.VespaXMLFeedReader; import java.io.IOException; @@ -55,10 +56,12 @@ public class RestApi extends LoggingRequestHandler { private AtomicInteger threadsAvailableForApi = new AtomicInteger(20 /*max concurrent requests */); @Inject - public RestApi(Executor executor, AccessLog accessLog, DocumentmanagerConfig documentManagerConfig) { + public RestApi(Executor executor, AccessLog accessLog, DocumentmanagerConfig documentManagerConfig, + LoadTypeConfig loadTypeConfig) { super(executor, accessLog); - final LoadTypeSet loadTypes = new LoadTypeSet("client"); - this.operationHandler = new OperationHandlerImpl(new MessageBusDocumentAccess(new MessageBusParams(loadTypes))); + MessageBusParams params = new MessageBusParams(new LoadTypeSet(loadTypeConfig)); + params.setDocumentmanagerConfig(documentManagerConfig); + this.operationHandler = new OperationHandlerImpl(new MessageBusDocumentAccess(params)); this.singleDocumentParser = new SingleDocumentParser(new DocumentTypeManager(documentManagerConfig)); } diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemove.java b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemove.java index 14b2d86ae75..87a7ebe9e49 100755 --- a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemove.java +++ b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemove.java @@ -3,9 +3,12 @@ package com.yahoo.feedhandler; import com.google.inject.Inject; import com.yahoo.clientmetrics.RouteMetricSet; +import com.yahoo.cloud.config.ClusterListConfig; +import com.yahoo.cloud.config.SlobroksConfig; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.document.DocumentId; +import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.feedapi.FeedContext; import com.yahoo.feedapi.MessagePropertyProcessor; import com.yahoo.feedapi.SingleSender; @@ -20,9 +23,14 @@ import java.util.concurrent.Executor; public class VespaFeedHandlerRemove extends VespaFeedHandlerBase { @Inject - public VespaFeedHandlerRemove(FeederConfig feederConfig, - LoadTypeConfig loadTypeConfig, Executor executor, Metric metric) throws Exception { - super(feederConfig, loadTypeConfig, executor, metric); + public VespaFeedHandlerRemove(FeederConfig feederConfig, + LoadTypeConfig loadTypeConfig, + DocumentmanagerConfig documentmanagerConfig, + SlobroksConfig slobroksConfig, + ClusterListConfig clusterListConfig, + Executor executor, + Metric metric) throws Exception { + super(feederConfig, loadTypeConfig, documentmanagerConfig, slobroksConfig, clusterListConfig, executor, metric); } VespaFeedHandlerRemove(FeedContext context, Executor executor) throws Exception { diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemoveLocation.java b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemoveLocation.java index 3b2f82c865e..04d22386bfb 100644 --- a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemoveLocation.java +++ b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemoveLocation.java @@ -3,8 +3,11 @@ package com.yahoo.feedhandler; import com.google.inject.Inject; import com.yahoo.clientmetrics.RouteMetricSet; +import com.yahoo.cloud.config.ClusterListConfig; +import com.yahoo.cloud.config.SlobroksConfig; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; +import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.documentapi.messagebus.protocol.RemoveLocationMessage; import com.yahoo.feedapi.FeedContext; import com.yahoo.feedapi.MessagePropertyProcessor; @@ -19,9 +22,13 @@ import java.util.concurrent.Executor; public class VespaFeedHandlerRemoveLocation extends VespaFeedHandlerBase { @Inject - public VespaFeedHandlerRemoveLocation(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig, Executor executor, - Metric metric) throws Exception { - super(feederConfig, loadTypeConfig, executor, metric); + public VespaFeedHandlerRemoveLocation(FeederConfig feederConfig, + LoadTypeConfig loadTypeConfig, + DocumentmanagerConfig documentmanagerConfig, + SlobroksConfig slobroksConfig, + ClusterListConfig clusterListConfig, + Executor executor, Metric metric) throws Exception { + super(feederConfig, loadTypeConfig, documentmanagerConfig, slobroksConfig, clusterListConfig, executor, metric); } VespaFeedHandlerRemoveLocation(FeedContext context, Executor executor) throws Exception { diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerStatus.java b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerStatus.java index 77930ae5a94..ed80443f970 100755 --- a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerStatus.java +++ b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerStatus.java @@ -3,9 +3,12 @@ package com.yahoo.feedhandler; import java.util.concurrent.Executor; +import com.yahoo.cloud.config.ClusterListConfig; +import com.yahoo.cloud.config.SlobroksConfig; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; import com.yahoo.container.jdisc.ThreadedHttpRequestHandler; +import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.vespa.config.content.LoadTypeConfig; import com.yahoo.feedapi.FeedContext; import com.yahoo.metrics.MetricManager; @@ -16,8 +19,14 @@ public class VespaFeedHandlerStatus extends ThreadedHttpRequestHandler { private MetricManager manager; - public VespaFeedHandlerStatus(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig, Executor executor) { - this(FeedContext.getInstance(feederConfig, loadTypeConfig, new NullFeedMetric()), true, true, executor); + public VespaFeedHandlerStatus(FeederConfig feederConfig, + LoadTypeConfig loadTypeConfig, + DocumentmanagerConfig documentmanagerConfig, + SlobroksConfig slobroksConfig, + ClusterListConfig clusterListConfig, + Executor executor) { + this(FeedContext.getInstance(feederConfig, loadTypeConfig, documentmanagerConfig, slobroksConfig, + clusterListConfig, new NullFeedMetric()), true, true, executor); } VespaFeedHandlerStatus(FeedContext context, boolean doLog, boolean makeSnapshots, Executor executor) { diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/GetSearcher.java b/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/GetSearcher.java index 661fcac6a64..cf42bce9c1c 100755 --- a/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/GetSearcher.java +++ b/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/GetSearcher.java @@ -2,7 +2,10 @@ package com.yahoo.storage.searcher; import com.google.inject.Inject; +import com.yahoo.cloud.config.ClusterListConfig; +import com.yahoo.cloud.config.SlobroksConfig; import com.yahoo.container.jdisc.HttpRequest; +import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.feedhandler.NullFeedMetric; import com.yahoo.processing.request.CompoundName; import com.yahoo.vespa.config.content.LoadTypeConfig; @@ -169,9 +172,13 @@ public class GetSearcher extends Searcher { } @Inject - public GetSearcher(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig) throws Exception { - this(FeedContext.getInstance(feederConfig, loadTypeConfig, new NullFeedMetric()), - (long)(feederConfig.timeout() * 1000)); + public GetSearcher(FeederConfig feederConfig, + LoadTypeConfig loadTypeConfig, + DocumentmanagerConfig documentmanagerConfig, + SlobroksConfig slobroksConfig, + ClusterListConfig clusterListConfig) throws Exception { + this(FeedContext.getInstance(feederConfig, loadTypeConfig, documentmanagerConfig, slobroksConfig, + clusterListConfig, new NullFeedMetric()), (long)(feederConfig.timeout() * 1000)); } GetSearcher(FeedContext context) throws Exception { diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/VisitSearcher.java b/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/VisitSearcher.java index 621ffcefbe1..2d7e5fbc338 100644 --- a/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/VisitSearcher.java +++ b/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/VisitSearcher.java @@ -1,6 +1,9 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.storage.searcher; +import com.yahoo.cloud.config.ClusterListConfig; +import com.yahoo.cloud.config.SlobroksConfig; +import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.feedhandler.NullFeedMetric; import com.yahoo.vespa.config.content.LoadTypeConfig; import com.yahoo.component.ComponentId; @@ -30,8 +33,13 @@ public class VisitSearcher extends Searcher { public static final String VISITOR_CONTINUATION_TOKEN_FIELDNAME = "visitorContinuationToken"; FeedContext context; - public VisitSearcher(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig) throws Exception { - this(FeedContext.getInstance(feederConfig, loadTypeConfig, new NullFeedMetric())); + public VisitSearcher(FeederConfig feederConfig, + LoadTypeConfig loadTypeConfig, + DocumentmanagerConfig documentmanagerConfig, + SlobroksConfig slobroksConfig, + ClusterListConfig clusterListConfig) throws Exception { + this(FeedContext.getInstance(feederConfig, loadTypeConfig, documentmanagerConfig, + slobroksConfig, clusterListConfig, new NullFeedMetric())); } VisitSearcher(FeedContext context) throws Exception { diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java new file mode 100644 index 00000000000..ec9cdc594e9 --- /dev/null +++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java @@ -0,0 +1,34 @@ +package com.yahoo.document.restapi; + +import com.yahoo.application.Application; +import com.yahoo.application.Networking; +import org.junit.Test; + +import java.io.IOException; +import java.net.ServerSocket; + +/** + * @author bratseth + */ +public class DocumentApiApplicationTest { + + /** Test that it is possible to instantiate an Application with a document-api */ + @Test + public void application_with_document_api() throws IOException { + String services = + "<jdisc version='1.0'>" + + " <http><server port=\"" + findRandomOpenPortOnAllLocalInterfaces() + "\" id=\"foobar\"/></http>" + + " <document-api/>" + + "</jdisc>"; + try (Application application = Application.fromServicesXml(services, Networking.enable)) { + } + } + + private int findRandomOpenPortOnAllLocalInterfaces() throws IOException { + ServerSocket socket = new ServerSocket(0); + socket.setReuseAddress(true); + int port = socket.getLocalPort(); + socket.close(); + return port; + } +} diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java index c4e9e27ca75..95a48ab41fe 100644 --- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java +++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java @@ -33,11 +33,12 @@ import static org.hamcrest.core.StringStartsWith.startsWith; import static org.junit.Assert.assertThat; public class RestApiTest { + Application application; @Before public void setup() throws Exception { - application = Application.fromApplicationPackage(Paths.get("src/test/application"), Networking.enable); + application = Application.fromApplicationPackage(Paths.get("src/test/rest-api-application"), Networking.enable); } @After @@ -249,7 +250,7 @@ public class RestApiTest { public void testbasicEncodingV2() throws Exception { Request request = new Request("http://localhost:" + getFirstListenPort() + get_enc_test_uri_v2); HttpGet get = new HttpGet(request.getUri()); - final String rest = doRest(get); + String rest = doRest(get); assertThat(rest, containsString(get_enc_response_part1_v2)); assertThat(rest, containsString(get_enc_response_part2)); } @@ -264,7 +265,7 @@ public class RestApiTest { public void testbasicVisit() throws Exception { Request request = new Request("http://localhost:" + getFirstListenPort() + visit_test_uri); HttpGet get = new HttpGet(request.getUri()); - final String rest = doRest(get); + String rest = doRest(get); assertThat(rest, containsString(visit_response_part1)); assertThat(rest, containsString(visit_response_part2)); assertThat(rest, containsString(visit_response_part3)); @@ -276,9 +277,9 @@ public class RestApiTest { @Test public void testBadVisit() throws Exception { - final Request request = new Request("http://localhost:" + getFirstListenPort() + visit_test_bad_uri); + Request request = new Request("http://localhost:" + getFirstListenPort() + visit_test_bad_uri); HttpGet get = new HttpGet(request.getUri()); - final String rest = doRest(get); + String rest = doRest(get); assertThat(rest, containsString(visit_test_bad_response)); } diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/storage/searcher/VisitorSearcherTestCase.java b/vespaclient-container-plugin/src/test/java/com/yahoo/storage/searcher/VisitorSearcherTestCase.java index 820f7f56e2f..4b1c69c73e7 100644 --- a/vespaclient-container-plugin/src/test/java/com/yahoo/storage/searcher/VisitorSearcherTestCase.java +++ b/vespaclient-container-plugin/src/test/java/com/yahoo/storage/searcher/VisitorSearcherTestCase.java @@ -60,8 +60,7 @@ public class VisitorSearcherTestCase { public VisitSearcher create() throws Exception { ClusterListConfig.Storage.Builder storageCluster = new ClusterListConfig.Storage.Builder().configid("storage/cluster.foobar").name("foobar"); ClusterListConfig clusterListCfg = new ClusterListConfig(new ClusterListConfig.Builder().storage(storageCluster)); - ClusterList clusterList = new ClusterList(); - clusterList.configure(clusterListCfg); + ClusterList clusterList = new ClusterList(clusterListCfg); return new VisitSearcher(new FeedContext( new MessagePropertyProcessor(new FeederConfig(new FeederConfig.Builder().timeout(458).route("riksveg18").retryenabled(true)), new LoadTypeConfig(new LoadTypeConfig.Builder())), @@ -139,15 +138,13 @@ public class VisitorSearcherTestCase { ClusterListConfig.Storage.Builder storageCluster1 = new ClusterListConfig.Storage.Builder().configid("storage/cluster.foo").name("foo"); ClusterListConfig.Storage.Builder storageCluster2 = new ClusterListConfig.Storage.Builder().configid("storage/cluster.bar").name("bar"); ClusterListConfig clusterListCfg = new ClusterListConfig(new ClusterListConfig.Builder().storage(Arrays.asList(storageCluster1, storageCluster2))); - ClusterList clusterList = new ClusterList(); - clusterList.configure(clusterListCfg); + ClusterList clusterList = new ClusterList(clusterListCfg); VisitSearcher searcher = new VisitSearcher(new FeedContext( new MessagePropertyProcessor(new FeederConfig(new FeederConfig.Builder().timeout(100).route("whatever").retryenabled(true)), new LoadTypeConfig(new LoadTypeConfig.Builder())), factory, docMan, clusterList, new NullFeedMetric())); - searcher.getVisitorParameters( - newQuery("visit?visit.selection=id.user=1234"), null); + searcher.getVisitorParameters(newQuery("visit?visit.selection=id.user=1234"), null); } @Test diff --git a/vespaclient-container-plugin/src/test/application/services.xml b/vespaclient-container-plugin/src/test/rest-api-application/services.xml index df178e109c3..df178e109c3 100644 --- a/vespaclient-container-plugin/src/test/application/services.xml +++ b/vespaclient-container-plugin/src/test/rest-api-application/services.xml diff --git a/vespaclient-core/src/main/java/com/yahoo/feedapi/FeedContext.java b/vespaclient-core/src/main/java/com/yahoo/feedapi/FeedContext.java index a26064cd98b..885e28b63a5 100755 --- a/vespaclient-core/src/main/java/com/yahoo/feedapi/FeedContext.java +++ b/vespaclient-core/src/main/java/com/yahoo/feedapi/FeedContext.java @@ -1,6 +1,9 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.feedapi; +import com.yahoo.cloud.config.ClusterListConfig; +import com.yahoo.cloud.config.SlobroksConfig; +import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.jdisc.Metric; import com.yahoo.vespa.config.content.LoadTypeConfig; import com.yahoo.document.DocumentTypeManager; @@ -8,6 +11,7 @@ import com.yahoo.clientmetrics.ClientMetrics; import com.yahoo.vespaclient.ClusterList; import com.yahoo.vespaclient.config.FeederConfig; +import javax.naming.OperationNotSupportedException; import java.util.Map; import java.util.TreeMap; @@ -87,16 +91,35 @@ public class FeedContext { return docTypeManager; } - public static FeedContext getInstance(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig, Metric metric) { + public static FeedContext getInstance(FeederConfig feederConfig, + LoadTypeConfig loadTypeConfig, + DocumentmanagerConfig documentmanagerConfig, + SlobroksConfig slobroksConfig, + ClusterListConfig clusterListConfig, + Metric metric) { synchronized (sync) { try { if (instance == null) { MessagePropertyProcessor proc = new MessagePropertyProcessor(feederConfig, loadTypeConfig); - MessageBusSessionFactory mbusFactory = new MessageBusSessionFactory(proc); - instance = new FeedContext(proc, - mbusFactory, - mbusFactory.getAccess().getDocumentTypeManager(), - new ClusterList("client"), metric); + + if (System.getProperty("vespa.local", "false").equals("true")) { + // Use injected configs when running from Application. This means we cannot reconfigure + MessageBusSessionFactory mbusFactory = new MessageBusSessionFactory(proc, documentmanagerConfig, slobroksConfig); + instance = new FeedContext(proc, + mbusFactory, + mbusFactory.getAccess().getDocumentTypeManager(), + new ClusterList(clusterListConfig), metric); + } + else { + // Don't send configs to messagebus to make it self-subscribe instead as this instance + // survives reconfig :-/ + // This code will die soon ... + MessageBusSessionFactory mbusFactory = new MessageBusSessionFactory(proc, null, null); + instance = new FeedContext(proc, + mbusFactory, + mbusFactory.getAccess().getDocumentTypeManager(), + new ClusterList("client"), metric); + } } else { instance.getPropertyProcessor().configure(feederConfig, loadTypeConfig); } diff --git a/vespaclient-core/src/main/java/com/yahoo/feedapi/FeederOptions.java b/vespaclient-core/src/main/java/com/yahoo/feedapi/FeederOptions.java index 2894993b983..1546d605f02 100755 --- a/vespaclient-core/src/main/java/com/yahoo/feedapi/FeederOptions.java +++ b/vespaclient-core/src/main/java/com/yahoo/feedapi/FeederOptions.java @@ -252,18 +252,6 @@ public class FeederOptions { return params; } - public MessageBusParams toMessageBusParams() { - MessageBusParams mbusParams = new MessageBusParams(); - if (retryEnabled) { - RetryTransientErrorsPolicy retryPolicy = new RetryTransientErrorsPolicy(); - retryPolicy.setBaseDelay(retryDelay); - mbusParams.setRetryPolicy(retryPolicy); - } else { - mbusParams.setRetryPolicy(null); - } - return mbusParams; - } - public RPCNetworkParams getNetworkParams() { try { RPCNetworkParams networkParams = new RPCNetworkParams(); diff --git a/vespaclient-core/src/main/java/com/yahoo/feedapi/MessageBusSessionFactory.java b/vespaclient-core/src/main/java/com/yahoo/feedapi/MessageBusSessionFactory.java index 8021ea86783..d670ceb4e77 100755 --- a/vespaclient-core/src/main/java/com/yahoo/feedapi/MessageBusSessionFactory.java +++ b/vespaclient-core/src/main/java/com/yahoo/feedapi/MessageBusSessionFactory.java @@ -1,6 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.feedapi; +import com.yahoo.cloud.config.SlobroksConfig; +import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.documentapi.VisitorParameters; import com.yahoo.documentapi.VisitorSession; import com.yahoo.documentapi.messagebus.MessageBusDocumentAccess; @@ -12,6 +14,7 @@ import com.yahoo.jdisc.Metric; import com.yahoo.messagebus.Message; import com.yahoo.messagebus.ReplyHandler; import com.yahoo.messagebus.SourceSession; +import com.yahoo.messagebus.network.rpc.RPCNetworkParams; import java.util.Collections; @@ -27,12 +30,24 @@ public class MessageBusSessionFactory implements SessionFactory { String NUM_UPDATES = "num_updates"; } + @SuppressWarnings("unused") // used from extensions public MessageBusSessionFactory(MessagePropertyProcessor processor) { + this(processor, null, null); + } + + public MessageBusSessionFactory(MessagePropertyProcessor processor, + DocumentmanagerConfig documentmanagerConfig, + SlobroksConfig slobroksConfig) { this.processor = processor; MessageBusParams params = new MessageBusParams(processor.getLoadTypes()); params.setTraceLevel(processor.getFeederOptions().getTraceLevel()); - params.setRPCNetworkParams(processor.getFeederOptions().getNetworkParams()); + RPCNetworkParams rpcNetworkParams = processor.getFeederOptions().getNetworkParams(); + if (slobroksConfig != null) // not set: will subscribe + rpcNetworkParams.setSlobroksConfig(slobroksConfig); + params.setRPCNetworkParams(rpcNetworkParams); params.setDocumentManagerConfigId("client"); + if (documentmanagerConfig != null) // not set: will subscribe + params.setDocumentmanagerConfig(documentmanagerConfig); access = new MessageBusDocumentAccess(params); } diff --git a/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandler.java b/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandler.java index 6e3facbdc98..08e1ca0482f 100755 --- a/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandler.java +++ b/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandler.java @@ -3,8 +3,11 @@ package com.yahoo.feedhandler; import com.google.inject.Inject; import com.yahoo.clientmetrics.RouteMetricSet; +import com.yahoo.cloud.config.ClusterListConfig; +import com.yahoo.cloud.config.SlobroksConfig; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.HttpResponse; +import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.feedapi.DocprocMessageProcessor; import com.yahoo.feedapi.FeedContext; import com.yahoo.feedapi.Feeder; @@ -30,9 +33,14 @@ public final class VespaFeedHandler extends VespaFeedHandlerBase { public static final String JSON_INPUT = "jsonInput"; @Inject - public VespaFeedHandler(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig, Executor executor, + public VespaFeedHandler(FeederConfig feederConfig, + LoadTypeConfig loadTypeConfig, + DocumentmanagerConfig documentmanagerConfig, + SlobroksConfig slobroksConfig, + ClusterListConfig clusterListConfig, + Executor executor, Metric metric) throws Exception { - super(feederConfig, loadTypeConfig, executor, metric); + super(feederConfig, loadTypeConfig, documentmanagerConfig, slobroksConfig, clusterListConfig, executor, metric); } VespaFeedHandler(FeedContext context, Executor executor) throws Exception { diff --git a/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerBase.java b/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerBase.java index fa1e6854593..6b4810f1ac4 100755 --- a/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerBase.java +++ b/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerBase.java @@ -3,11 +3,14 @@ package com.yahoo.feedhandler; import com.google.inject.Inject; import com.yahoo.clientmetrics.ClientMetrics; +import com.yahoo.cloud.config.ClusterListConfig; +import com.yahoo.cloud.config.SlobroksConfig; import com.yahoo.component.provider.ComponentRegistry; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.container.jdisc.ThreadedHttpRequestHandler; import com.yahoo.docproc.DocprocService; import com.yahoo.document.DocumentTypeManager; +import com.yahoo.document.config.DocumentmanagerConfig; import com.yahoo.feedapi.FeedContext; import com.yahoo.feedapi.MessagePropertyProcessor; import com.yahoo.feedapi.SharedSender; @@ -29,9 +32,14 @@ public abstract class VespaFeedHandlerBase extends ThreadedHttpRequestHandler { @Inject public VespaFeedHandlerBase(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig, + DocumentmanagerConfig documentmanagerConfig, + SlobroksConfig slobroksConfig, + ClusterListConfig clusterListConfig, Executor executor, Metric metric) throws Exception { - this(FeedContext.getInstance(feederConfig, loadTypeConfig, metric), executor, (long)feederConfig.timeout() * 1000); + this(FeedContext.getInstance(feederConfig, loadTypeConfig, documentmanagerConfig, + slobroksConfig, clusterListConfig, metric), + executor, (long)feederConfig.timeout() * 1000); } public VespaFeedHandlerBase(FeedContext context, Executor executor) throws Exception { diff --git a/vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterList.java b/vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterList.java index 3ea3bb5cb9d..7587630a985 100644 --- a/vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterList.java +++ b/vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterList.java @@ -5,36 +5,39 @@ import com.yahoo.cloud.config.ClusterListConfig; import com.yahoo.config.subscription.ConfigGetter; import java.util.ArrayList; +import java.util.Collections; import java.util.List; +/** A list of content clusters, either obtained from a list, a given config or by self-subscribing */ public class ClusterList { - List<ClusterDef> storageClusters = new ArrayList<ClusterDef>(); + + List<ClusterDef> contentClusters = new ArrayList<>(); public ClusterList() { - this(null); + this(new ArrayList<>()); + } + + public ClusterList(List<ClusterDef> contentClusters) { + this.contentClusters = contentClusters; } public ClusterList(String configId) { - if (configId != null) { - configure(new ConfigGetter<>(ClusterListConfig.class).getConfig(configId)); - } + configure(new ConfigGetter<>(ClusterListConfig.class).getConfig(configId)); } - - public List<ClusterDef> getStorageClusters() { - return storageClusters; + + public ClusterList(ClusterListConfig config) { + configure(config); } - public void configure(ClusterListConfig cfg) { - storageClusters.clear(); - for (int i = 0; i < cfg.storage().size(); i++) { - storageClusters.add(new ClusterDef(cfg.storage(i).name(), - cfg.storage(i).configid())); - } + private void configure(ClusterListConfig config) { + contentClusters.clear(); // TODO: Create a new + for (int i = 0; i < config.storage().size(); i++) + contentClusters.add(new ClusterDef(config.storage(i).name(), config.storage(i).configid())); } - public static ClusterList createMockedList(List<ClusterDef> clusters) { - ClusterList list = new ClusterList(null); - list.storageClusters = clusters; - return list; + /** Returns a reference to the mutable list */ + public List<ClusterDef> getStorageClusters() { + return contentClusters; // TODO: Use immutable list } + } diff --git a/vespajlib/src/main/java/com/yahoo/net/HostName.java b/vespajlib/src/main/java/com/yahoo/net/HostName.java index 9dff33e1f5f..4e791ca117a 100644 --- a/vespajlib/src/main/java/com/yahoo/net/HostName.java +++ b/vespajlib/src/main/java/com/yahoo/net/HostName.java @@ -15,12 +15,11 @@ public class HostName { private static String myHost = null; /** - * Static method that returns the name of localhost using shell - * command "hostname". + * Static method that returns the name of localhost using shell command "hostname". + * If you need a guaranteed resolvable name see LinuxINetAddress. * * @return the name of localhost. * @throws RuntimeException if executing the command 'hostname' fails. - * @see LinuxInetAddress if you need a host name/address which is reachable */ public static synchronized String getLocalhost() { if (myHost == null) { @@ -38,4 +37,5 @@ public class HostName { } return myHost; } + } diff --git a/vespajlib/src/main/java/com/yahoo/net/LinuxInetAddress.java b/vespajlib/src/main/java/com/yahoo/net/LinuxInetAddress.java index 1b7658b3a11..9d50c99d77c 100644 --- a/vespajlib/src/main/java/com/yahoo/net/LinuxInetAddress.java +++ b/vespajlib/src/main/java/com/yahoo/net/LinuxInetAddress.java @@ -12,28 +12,23 @@ import java.util.stream.Collectors; /** * Utilities for returning localhost addresses on Linux. - * See - * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4665037 - * on why this is necessary. + * See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4665037 on why this is necessary. * * @author bratseth */ -// TODO: Remove on vespa 7 public class LinuxInetAddress { /** - * Returns an InetAddress representing the address of the localhost. + * Returns an InetAddress representing a resolvable localhost address. * A non-loopback address is preferred if available. * An address that resolves to a hostname is preferred among non-loopback addresses. * IPv4 is preferred over IPv6 among resolving addresses. * * @return a localhost address - * @deprecated use {@link HostName} instead */ // Note: Checking resolvability of ipV6 addresses takes a long time on some systems (over 5 seconds // for some addresses on my mac). This method is written to minimize the number of resolution checks done // and to defer ip6 checks until necessary. - @Deprecated public static InetAddress getLocalHost() { InetAddress fallback = InetAddress.getLoopbackAddress(); try { @@ -70,9 +65,7 @@ public class LinuxInetAddress { * * @return an array of the addresses of this * @throws UnknownHostException if we cannot access the network - * @deprecated do not use */ - @Deprecated public static InetAddress[] getAllLocal() throws UnknownHostException { InetAddress[] localInetAddresses = InetAddress.getAllByName("127.0.0.1"); if ( ! localInetAddresses[0].isLoopbackAddress()) return localInetAddresses; diff --git a/vespajlib/src/main/java/com/yahoo/tensor/serialization/CompactBinaryFormat.java b/vespajlib/src/main/java/com/yahoo/tensor/serialization/SparseBinaryFormat.java index e850e20fcab..711e0e834da 100644 --- a/vespajlib/src/main/java/com/yahoo/tensor/serialization/CompactBinaryFormat.java +++ b/vespajlib/src/main/java/com/yahoo/tensor/serialization/SparseBinaryFormat.java @@ -12,7 +12,7 @@ import com.yahoo.text.Utf8; import java.util.*; /** - * Implementation of a compact binary format for a tensor on the form: + * Implementation of a sparse binary format for a tensor on the form: * * Sorted dimensions = num_dimensions [dimension_str_len dimension_str_bytes]* * Cells = num_cells [label_1_str_len label_1_str_bytes ... label_N_str_len label_N_str_bytes cell_value]* @@ -23,7 +23,7 @@ import java.util.*; * @author geirst */ @Beta -class CompactBinaryFormat implements BinaryFormat { +class SparseBinaryFormat implements BinaryFormat { @Override public void encode(GrowableByteBuffer buffer, Tensor tensor) { diff --git a/vespajlib/src/main/java/com/yahoo/tensor/serialization/TypedBinaryFormat.java b/vespajlib/src/main/java/com/yahoo/tensor/serialization/TypedBinaryFormat.java index 9e2c0b5a63f..5a45f20b6d8 100644 --- a/vespajlib/src/main/java/com/yahoo/tensor/serialization/TypedBinaryFormat.java +++ b/vespajlib/src/main/java/com/yahoo/tensor/serialization/TypedBinaryFormat.java @@ -17,12 +17,12 @@ import com.yahoo.tensor.Tensor; @Beta public class TypedBinaryFormat { - private static final int COMPACT_BINARY_FORMAT_TYPE = 1; + private static final int SPARSE_BINARY_FORMAT_TYPE = 1; public static byte[] encode(Tensor tensor) { GrowableByteBuffer buffer = new GrowableByteBuffer(); - buffer.putInt1_4Bytes(COMPACT_BINARY_FORMAT_TYPE); - new CompactBinaryFormat().encode(buffer, tensor); + buffer.putInt1_4Bytes(SPARSE_BINARY_FORMAT_TYPE); + new SparseBinaryFormat().encode(buffer, tensor); buffer.flip(); byte[] result = new byte[buffer.remaining()]; buffer.get(result); @@ -33,8 +33,8 @@ public class TypedBinaryFormat { GrowableByteBuffer buffer = GrowableByteBuffer.wrap(data); int formatType = buffer.getInt1_4Bytes(); switch (formatType) { - case COMPACT_BINARY_FORMAT_TYPE: - return new CompactBinaryFormat().decode(buffer); + case SPARSE_BINARY_FORMAT_TYPE: + return new SparseBinaryFormat().decode(buffer); default: throw new IllegalArgumentException("Binary format type " + formatType + " is not a known format"); } diff --git a/vespajlib/src/test/java/com/yahoo/tensor/serialization/CompactBinaryFormatTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/serialization/SparseBinaryFormatTestCase.java index bfa7f5a8546..8580868dfdf 100644 --- a/vespajlib/src/test/java/com/yahoo/tensor/serialization/CompactBinaryFormatTestCase.java +++ b/vespajlib/src/test/java/com/yahoo/tensor/serialization/SparseBinaryFormatTestCase.java @@ -12,14 +12,14 @@ import java.util.Set; import static org.junit.Assert.assertEquals; /** - * Tests for the compact binary format. + * Tests for the sparse binary format. * * TODO: When new formats are added we should refactor this test to test all formats * with the same set of tensor inputs (if feasible). * * @author geirst */ -public class CompactBinaryFormatTestCase { +public class SparseBinaryFormatTestCase { private static void assertSerialization(String tensorString) { assertSerialization(MapTensor.from(tensorString)); diff --git a/vespalib/CMakeLists.txt b/vespalib/CMakeLists.txt index ec43cee3498..61ec9f1ccf9 100644 --- a/vespalib/CMakeLists.txt +++ b/vespalib/CMakeLists.txt @@ -72,15 +72,11 @@ vespa_define_module( src/tests/stllike src/tests/stringfmt src/tests/sync - src/tests/tensor/compact_tensor_builder - src/tests/tensor/compact_tensor_v2_builder + src/tests/tensor/sparse_tensor_builder + src/tests/tensor/dense_tensor_address_combiner src/tests/tensor/dense_tensor_builder src/tests/tensor/dense_tensor_operations - src/tests/tensor/join_tensor_addresses - src/tests/tensor/simple_tensor_builder - src/tests/tensor/tensor src/tests/tensor/tensor_address - src/tests/tensor/tensor_address_element_iterator src/tests/tensor/tensor_conformance src/tests/tensor/tensor_function src/tests/tensor/tensor_mapper @@ -132,10 +128,9 @@ vespa_define_module( src/vespa/vespalib/objects src/vespa/vespalib/stllike src/vespa/vespalib/tensor - src/vespa/vespalib/tensor/compact + src/vespa/vespalib/tensor/sparse src/vespa/vespalib/tensor/dense src/vespa/vespalib/tensor/serialization - src/vespa/vespalib/tensor/simple src/vespa/vespalib/test src/vespa/vespalib/testkit src/vespa/vespalib/text diff --git a/vespalib/src/apps/eval_expr/eval_expr.cpp b/vespalib/src/apps/eval_expr/eval_expr.cpp index 9d3f0992867..dc5274cde47 100644 --- a/vespalib/src/apps/eval_expr/eval_expr.cpp +++ b/vespalib/src/apps/eval_expr/eval_expr.cpp @@ -20,7 +20,7 @@ int main(int argc, char **argv) { return 1; } InterpretedFunction::Context ctx; - InterpretedFunction interpreted(SimpleTensorEngine::ref(), function); + InterpretedFunction interpreted(SimpleTensorEngine::ref(), function, NodeTypes()); double result = interpreted.eval(ctx).as_double(); fprintf(stdout, "%.32g\n", result); return 0; diff --git a/vespalib/src/testlist.txt b/vespalib/src/testlist.txt index 5267b19cdfd..67982805df7 100644 --- a/vespalib/src/testlist.txt +++ b/vespalib/src/testlist.txt @@ -62,15 +62,12 @@ tests/stash tests/stllike tests/stringfmt tests/sync -tests/tensor/compact_tensor_builder -tests/tensor/compact_tensor_v2_builder +tests/tensor/sparse_tensor_builder tests/tensor/dense_tensor_builder tests/tensor/dense_tensor_operations -tests/tensor/join_tensor_addresses tests/tensor/simple_tensor_builder tests/tensor/tensor tests/tensor/tensor_address -tests/tensor/tensor_address_element_iterator tests/tensor/tensor_function tests/tensor/tensor_mapper tests/tensor/tensor_operations diff --git a/vespalib/src/tests/alloc/alloc_test.cpp b/vespalib/src/tests/alloc/alloc_test.cpp index 5f1ba897f61..b16afbcc7a6 100644 --- a/vespalib/src/tests/alloc/alloc_test.cpp +++ b/vespalib/src/tests/alloc/alloc_test.cpp @@ -8,6 +8,7 @@ LOG_SETUP("alloc_test"); #include <vespa/vespalib/util/exceptions.h> using namespace vespalib; +using namespace vespalib::alloc; class Test : public TestApp { @@ -36,7 +37,7 @@ Test::testSwap(T & a, T & b) void * tmpB(b.get()); EXPECT_EQUAL(100u, a.size()); EXPECT_EQUAL(200u, b.size()); - swap(a, b); + std::swap(a, b); EXPECT_EQUAL(100u, b.size()); EXPECT_EQUAL(200u, a.size()); EXPECT_EQUAL(tmpA, b.get()); @@ -47,31 +48,36 @@ void Test::testBasic() { { - HeapAlloc h(100); + Alloc h = HeapAllocFactory::create(100); EXPECT_EQUAL(100u, h.size()); EXPECT_TRUE(h.get() != NULL); } { - EXPECT_EXCEPTION(AlignedHeapAlloc(100, 0), IllegalArgumentException, "posix_memalign(100, 0) failed with code 22"); - AlignedHeapAlloc h(100, 1024); + EXPECT_EXCEPTION(AlignedHeapAllocFactory::create(100, 7), IllegalArgumentException, "AlignedHeapAllocFactory::create(100, 7) does not support 7 alignment"); + Alloc h = AlignedHeapAllocFactory::create(100, 1024); EXPECT_EQUAL(100u, h.size()); EXPECT_TRUE(h.get() != NULL); } { - MMapAlloc h(100); + Alloc h = MMapAllocFactory::create(100); EXPECT_EQUAL(100u, h.size()); EXPECT_TRUE(h.get() != NULL); } { - HeapAlloc a(100), b(200); + Alloc a = HeapAllocFactory::create(100), b = HeapAllocFactory::create(200); testSwap(a, b); } { - MMapAlloc a(100), b(200); + Alloc a = MMapAllocFactory::create(100), b = MMapAllocFactory::create(200); testSwap(a, b); } { - AlignedHeapAlloc a(100, 1024), b(200, 1024); + Alloc a = AlignedHeapAllocFactory::create(100, 1024), b = AlignedHeapAllocFactory::create(200, 1024); + testSwap(a, b); + } + { + Alloc a = HeapAllocFactory::create(100); + Alloc b = MMapAllocFactory::create(200); testSwap(a, b); } } @@ -80,13 +86,13 @@ void Test::testAlignedAllocation() { { - AutoAlloc<2048, 1024> buf(10); + Alloc buf = AutoAllocFactory::create(10, MemoryAllocator::HUGEPAGE_SIZE, 1024); EXPECT_TRUE(reinterpret_cast<ptrdiff_t>(buf.get()) % 1024 == 0); } { // Mmapped pointers are page-aligned, but sanity test anyway. - AutoAlloc<1024, 512> buf(3000); + Alloc buf = AutoAllocFactory::create(3000000, MemoryAllocator::HUGEPAGE_SIZE, 512); EXPECT_TRUE(reinterpret_cast<ptrdiff_t>(buf.get()) % 512 == 0); } } diff --git a/vespalib/src/tests/alloc/allocate_and_core.cpp b/vespalib/src/tests/alloc/allocate_and_core.cpp index 2cb4d447fd1..f0a0669eb42 100644 --- a/vespalib/src/tests/alloc/allocate_and_core.cpp +++ b/vespalib/src/tests/alloc/allocate_and_core.cpp @@ -1,14 +1,14 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/vespalib/util/alloc.h> -using namespace vespalib; +using namespace vespalib::alloc; int main(int argc, char *argv[]) { (void) argc; (void) argv; - MMapAlloc small( 0x400000); //4M + Alloc small(MMapAllocFactory::create(0x400000)); //4M memset(small.get(), 0x55, small.size()); - MMapAlloc large(0x4000000); //640M + Alloc large(MMapAllocFactory::create(0x4000000)); //640M memset(large.get(), 0x66, large.size()); assert(false); } diff --git a/vespalib/src/tests/array/array_test.cpp b/vespalib/src/tests/array/array_test.cpp index 03b00769e7a..231bc00611a 100644 --- a/vespalib/src/tests/array/array_test.cpp +++ b/vespalib/src/tests/array/array_test.cpp @@ -28,8 +28,8 @@ private: namespace vespalib { -template <typename T, typename B> -std::ostream & operator << (std::ostream & os, const Array<T,B> & a) +template <typename T> +std::ostream & operator << (std::ostream & os, const Array<T> & a) { os << '{'; if (! a.empty()) { @@ -118,7 +118,7 @@ Test::Main() void Test::testThatOrganicGrowthIsBy2InNAndReserveResizeAreExact() { - Array<char, DefaultAlloc> c(256); + Array<char> c(256); EXPECT_EQUAL(256u, c.size()); EXPECT_EQUAL(256u, c.capacity()); c.reserve(258); @@ -157,7 +157,7 @@ void Test::testArray(const T & a, const T & b) { Array<T> array; - ASSERT_EQUAL(sizeof(array), 24u); + ASSERT_EQUAL(sizeof(array), 32u); ASSERT_EQUAL(array.size(), 0u); ASSERT_EQUAL(array.capacity(), 0u); for(size_t i(0); i < 5; i++) { diff --git a/vespalib/src/tests/eval/function_speed/function_speed_test.cpp b/vespalib/src/tests/eval/function_speed/function_speed_test.cpp index 888d4c6cab8..44e05f264dd 100644 --- a/vespalib/src/tests/eval/function_speed/function_speed_test.cpp +++ b/vespalib/src/tests/eval/function_speed/function_speed_test.cpp @@ -13,7 +13,7 @@ double sum_sum = 0.0; const char *function_str = "(0.35*p + 0.15*o + 0.30*q + 0.20*f) * w"; Function function_ast = Function::parse(params_5, function_str); -InterpretedFunction interpreted_function(SimpleTensorEngine::ref(), function_ast); +InterpretedFunction interpreted_function(SimpleTensorEngine::ref(), function_ast, NodeTypes()); CompiledFunction compiled_function(function_ast, PassParams::SEPARATE); auto jit_function = compiled_function.get_function<5>(); @@ -41,7 +41,7 @@ const char *big_function_str = "(0.35*p + 0.15*o + 0.30*q + 0.20*f) * w + " "(0.35*p + 0.15*o + 0.30*q + 0.20*f) * w"; Function big_function_ast = Function::parse(params_5, big_function_str); -InterpretedFunction big_interpreted_function(SimpleTensorEngine::ref(), big_function_ast); +InterpretedFunction big_interpreted_function(SimpleTensorEngine::ref(), big_function_ast, NodeTypes()); CompiledFunction big_compiled_function(big_function_ast, PassParams::SEPARATE); auto big_jit_function = big_compiled_function.get_function<5>(); diff --git a/vespalib/src/tests/eval/gbdt/gbdt_test.cpp b/vespalib/src/tests/eval/gbdt/gbdt_test.cpp index 74e4b8d1880..195836d9827 100644 --- a/vespalib/src/tests/eval/gbdt/gbdt_test.cpp +++ b/vespalib/src/tests/eval/gbdt/gbdt_test.cpp @@ -16,7 +16,7 @@ using namespace vespalib::eval::gbdt; //----------------------------------------------------------------------------- double eval_double(const Function &function, const std::vector<double> ¶ms) { - InterpretedFunction ifun(SimpleTensorEngine::ref(), function); + InterpretedFunction ifun(SimpleTensorEngine::ref(), function, NodeTypes()); InterpretedFunction::Context ctx; for (double param: params) { ctx.add_param(param); diff --git a/vespalib/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/vespalib/src/tests/eval/interpreted_function/interpreted_function_test.cpp index 7a53ae3ac21..cffd5396bf4 100644 --- a/vespalib/src/tests/eval/interpreted_function/interpreted_function_test.cpp +++ b/vespalib/src/tests/eval/interpreted_function/interpreted_function_test.cpp @@ -26,7 +26,7 @@ struct MyEvalTest : test::EvalSpec::EvalTest { { Function fun = Function::parse(param_names, expression); EXPECT_EQUAL(fun.num_params(), param_values.size()); - InterpretedFunction ifun(SimpleTensorEngine::ref(), fun); + InterpretedFunction ifun(SimpleTensorEngine::ref(), fun, NodeTypes()); InterpretedFunction::Context ictx; for (double param: param_values) { ictx.add_param(param); @@ -61,7 +61,7 @@ TEST("require that invalid function evaluates to a error") { std::vector<vespalib::string> params({"x", "y", "z", "w"}); Function function = Function::parse(params, "x & y"); EXPECT_TRUE(function.has_error()); - InterpretedFunction ifun(SimpleTensorEngine::ref(), function); + InterpretedFunction ifun(SimpleTensorEngine::ref(), function, NodeTypes()); InterpretedFunction::Context ctx; ctx.add_param(1); ctx.add_param(2); @@ -76,7 +76,7 @@ TEST("require that invalid function evaluates to a error") { size_t count_ifs(const vespalib::string &expr, std::initializer_list<double> params_in) { Function fun = Function::parse(expr); - InterpretedFunction ifun(SimpleTensorEngine::ref(), fun); + InterpretedFunction ifun(SimpleTensorEngine::ref(), fun, NodeTypes()); InterpretedFunction::Context ctx; for (double param: params_in) { ctx.add_param(param); @@ -102,7 +102,7 @@ TEST("require that interpreted function instructions have expected size") { TEST("require that basic addition works") { Function function = Function::parse("a+10"); - InterpretedFunction interpreted(SimpleTensorEngine::ref(), function); + InterpretedFunction interpreted(SimpleTensorEngine::ref(), function, NodeTypes()); InterpretedFunction::Context ctx; ctx.add_param(20); EXPECT_EQUAL(interpreted.eval(ctx).as_double(), 30.0); diff --git a/vespalib/src/tests/eval/node_types/node_types_test.cpp b/vespalib/src/tests/eval/node_types/node_types_test.cpp index 08b843cdfda..6eaabe482d8 100644 --- a/vespalib/src/tests/eval/node_types/node_types_test.cpp +++ b/vespalib/src/tests/eval/node_types/node_types_test.cpp @@ -240,4 +240,23 @@ TEST("require that various operations resolve appropriate type") { TEST_DO(verify_op1("sigmoid(%s)")); // Sigmoid } +TEST("require that double only expressions can be detected") { + Function plain_fun = Function::parse("1+2"); + Function complex_fun = Function::parse("sum({{x:1,y:2}:3})"); + NodeTypes plain_types(plain_fun, {}); + NodeTypes complex_types(complex_fun, {}); + EXPECT_TRUE(plain_types.get_type(plain_fun.root()).is_double()); + EXPECT_TRUE(complex_types.get_type(complex_fun.root()).is_double()); + EXPECT_TRUE(plain_types.all_types_are_double()); + EXPECT_FALSE(complex_types.all_types_are_double()); +} + +TEST("require that empty type repo works as expected") { + NodeTypes types; + Function function = Function::parse("1+2"); + EXPECT_FALSE(function.has_error()); + EXPECT_TRUE(types.get_type(function.root()).is_any()); + EXPECT_FALSE(types.all_types_are_double()); +} + TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp b/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp index 775c2b72e0a..33812779a30 100644 --- a/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp +++ b/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp @@ -16,101 +16,68 @@ using Stash = vespalib::Stash; // need to specify numbers explicitly as size_t to avoid ambiguous behavior for 0 constexpr size_t operator "" _z (unsigned long long int n) { return n; } -void dump(const Cells &cells, std::ostream &out) { - out << std::endl; - for (const auto &cell: cells) { - size_t n = 0; - out << " ["; - for (const auto &label: cell.address) { - if (n++) { - out << ","; - } - if (label.is_mapped()) { - out << label.name; - } else { - out << label.index; - } - } - out << "]: " << cell.value << std::endl; - } +const Tensor &unwrap(const Value &value) { + ASSERT_TRUE(value.is_tensor()); + return *value.as_tensor(); } -struct Check { +struct CellBuilder { Cells cells; - Check() : cells() {} - explicit Check(const SimpleTensor &tensor) : cells() { - for (const auto &cell: tensor.cells()) { - add(cell.address, cell.value); - } - } - explicit Check(const TensorSpec &spec) - : Check(*SimpleTensor::create(spec)) {} - Check &add(const Address &address, double value) { - cells.emplace_back(address, value); - std::sort(cells.begin(), cells.end(), - [](const auto &a, const auto &b){ return (a.address < b.address); }); + CellBuilder &add(const Address &addr, double value) { + cells.emplace_back(addr, value); return *this; } - bool operator==(const Check &rhs) const { - if (cells.size() != rhs.cells.size()) { - return false; - } - for (size_t i = 0; i < cells.size(); ++i) { - if ((cells[i].address != rhs.cells[i].address) || - (cells[i].value != rhs.cells[i].value)) - { - return false; - } - } - return true; - } + Cells build() { return cells; } }; -std::ostream &operator<<(std::ostream &out, const Check &value) { - dump(value.cells, out); - return out; -} - -const SimpleTensor &unwrap(const Tensor &tensor) { - ASSERT_EQUAL(&tensor.engine(), &SimpleTensorEngine::ref()); - return static_cast<const SimpleTensor &>(tensor); -} - -const SimpleTensor &unwrap(const Value &value) { - ASSERT_TRUE(value.is_tensor()); - return unwrap(*value.as_tensor()); -} - TEST("require that simple tensors can be built using tensor spec") { TensorSpec spec("tensor(w{},x[2],y{},z[2])"); spec.add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 1.0) .add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 2.0) .add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 3.0) .add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0); - std::unique_ptr<SimpleTensor> tensor = SimpleTensor::create(spec); - Check expect = Check() - .add({{"xxx"}, {0_z}, {"xxx"}, {0_z}}, 1.0) - .add({{"xxx"}, {0_z}, {"xxx"}, {1_z}}, 0.0) - .add({{"xxx"}, {1_z}, {"xxx"}, {0_z}}, 0.0) - .add({{"xxx"}, {1_z}, {"xxx"}, {1_z}}, 0.0) - //----------------------------------------- - .add({{"xxx"}, {0_z}, {"yyy"}, {0_z}}, 0.0) - .add({{"xxx"}, {0_z}, {"yyy"}, {1_z}}, 2.0) - .add({{"xxx"}, {1_z}, {"yyy"}, {0_z}}, 0.0) - .add({{"xxx"}, {1_z}, {"yyy"}, {1_z}}, 0.0) - //----------------------------------------- - .add({{"yyy"}, {0_z}, {"xxx"}, {0_z}}, 0.0) - .add({{"yyy"}, {0_z}, {"xxx"}, {1_z}}, 0.0) - .add({{"yyy"}, {1_z}, {"xxx"}, {0_z}}, 3.0) - .add({{"yyy"}, {1_z}, {"xxx"}, {1_z}}, 0.0) - //----------------------------------------- - .add({{"yyy"}, {0_z}, {"yyy"}, {0_z}}, 0.0) - .add({{"yyy"}, {0_z}, {"yyy"}, {1_z}}, 0.0) - .add({{"yyy"}, {1_z}, {"yyy"}, {0_z}}, 0.0) - .add({{"yyy"}, {1_z}, {"yyy"}, {1_z}}, 4.0); - EXPECT_EQUAL(expect, Check(*tensor)); - std::unique_ptr<Tensor> tensor2 = SimpleTensorEngine::ref().create(spec); - EXPECT_EQUAL(expect, Check(unwrap(*tensor2))); + auto tensor = SimpleTensorEngine::ref().create(spec); + TensorSpec full_spec("tensor(w{},x[2],y{},z[2])"); + full_spec + .add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 1.0) + .add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 1}}, 0.0) + .add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 0}}, 0.0) + .add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 2.0) + .add({{"w", "xxx"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 0.0) + .add({{"w", "xxx"}, {"x", 1}, {"y", "xxx"}, {"z", 1}}, 0.0) + .add({{"w", "xxx"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0) + .add({{"w", "xxx"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 0.0) + .add({{"w", "yyy"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 0.0) + .add({{"w", "yyy"}, {"x", 0}, {"y", "xxx"}, {"z", 1}}, 0.0) + .add({{"w", "yyy"}, {"x", 0}, {"y", "yyy"}, {"z", 0}}, 0.0) + .add({{"w", "yyy"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 0.0) + .add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 3.0) + .add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 1}}, 0.0) + .add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0) + .add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0); + auto full_tensor = SimpleTensorEngine::ref().create(full_spec); + SimpleTensor expect_tensor(ValueType::from_spec("tensor(w{},x[2],y{},z[2])"), + CellBuilder() + .add({{"xxx"}, {0_z}, {"xxx"}, {0_z}}, 1.0) + .add({{"xxx"}, {0_z}, {"xxx"}, {1_z}}, 0.0) + .add({{"xxx"}, {0_z}, {"yyy"}, {0_z}}, 0.0) + .add({{"xxx"}, {0_z}, {"yyy"}, {1_z}}, 2.0) + .add({{"xxx"}, {1_z}, {"xxx"}, {0_z}}, 0.0) + .add({{"xxx"}, {1_z}, {"xxx"}, {1_z}}, 0.0) + .add({{"xxx"}, {1_z}, {"yyy"}, {0_z}}, 0.0) + .add({{"xxx"}, {1_z}, {"yyy"}, {1_z}}, 0.0) + .add({{"yyy"}, {0_z}, {"xxx"}, {0_z}}, 0.0) + .add({{"yyy"}, {0_z}, {"xxx"}, {1_z}}, 0.0) + .add({{"yyy"}, {0_z}, {"yyy"}, {0_z}}, 0.0) + .add({{"yyy"}, {0_z}, {"yyy"}, {1_z}}, 0.0) + .add({{"yyy"}, {1_z}, {"xxx"}, {0_z}}, 3.0) + .add({{"yyy"}, {1_z}, {"xxx"}, {1_z}}, 0.0) + .add({{"yyy"}, {1_z}, {"yyy"}, {0_z}}, 0.0) + .add({{"yyy"}, {1_z}, {"yyy"}, {1_z}}, 4.0) + .build()); + EXPECT_EQUAL(expect_tensor, *tensor); + EXPECT_EQUAL(expect_tensor, *full_tensor); + EXPECT_EQUAL(full_spec, tensor->engine().to_spec(*tensor)); }; TEST("require that simple tensors can have their values negated") { @@ -125,10 +92,10 @@ TEST("require that simple tensors can have their values negated") { .add({{"x","2"},{"y","1"}}, 3) .add({{"x","1"},{"y","2"}}, -5)); auto result = SimpleTensor::perform(operation::Neg(), *tensor); - EXPECT_EQUAL(Check(*expect), Check(*result)); + EXPECT_EQUAL(*expect, *result); Stash stash; const Value &result2 = SimpleTensorEngine::ref().map(operation::Neg(), *tensor, stash); - EXPECT_EQUAL(Check(*expect), Check(unwrap(result2))); + EXPECT_EQUAL(*expect, unwrap(result2)); } TEST("require that simple tensors can be multiplied with each other") { @@ -150,10 +117,10 @@ TEST("require that simple tensors can be multiplied with each other") { .add({{"x","2"},{"y","1"},{"z","2"}}, 39) .add({{"x","1"},{"y","2"},{"z","1"}}, 55)); auto result = SimpleTensor::perform(operation::Mul(), *lhs, *rhs); - EXPECT_EQUAL(Check(*expect), Check(*result)); + EXPECT_EQUAL(*expect, *result); Stash stash; const Value &result2 = SimpleTensorEngine::ref().apply(operation::Mul(), *lhs, *rhs, stash); - EXPECT_EQUAL(Check(*expect), Check(unwrap(result2))); + EXPECT_EQUAL(*expect, unwrap(result2)); } TEST("require that simple tensors support dimension reduction") { @@ -178,22 +145,22 @@ TEST("require that simple tensors support dimension reduction") { auto result_sum_y = tensor->reduce(operation::Add(), {"y"}); auto result_sum_x = tensor->reduce(operation::Add(), {"x"}); auto result_sum_all = tensor->reduce(operation::Add(), {"x", "y"}); - EXPECT_EQUAL(Check(*expect_sum_y), Check(*result_sum_y)); - EXPECT_EQUAL(Check(*expect_sum_x), Check(*result_sum_x)); - EXPECT_EQUAL(Check(*expect_sum_all), Check(*result_sum_all)); + EXPECT_EQUAL(*expect_sum_y, *result_sum_y); + EXPECT_EQUAL(*expect_sum_x, *result_sum_x); + EXPECT_EQUAL(*expect_sum_all, *result_sum_all); Stash stash; const Value &result_sum_y_2 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {"y"}, stash); const Value &result_sum_x_2 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {"x"}, stash); const Value &result_sum_all_2 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {"x", "y"}, stash); const Value &result_sum_all_3 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {}, stash); - EXPECT_EQUAL(Check(*expect_sum_y), Check(unwrap(result_sum_y_2))); - EXPECT_EQUAL(Check(*expect_sum_x), Check(unwrap(result_sum_x_2))); + EXPECT_EQUAL(*expect_sum_y, unwrap(result_sum_y_2)); + EXPECT_EQUAL(*expect_sum_x, unwrap(result_sum_x_2)); EXPECT_TRUE(result_sum_all_2.is_double()); EXPECT_TRUE(result_sum_all_3.is_double()); EXPECT_EQUAL(21, result_sum_all_2.as_double()); EXPECT_EQUAL(21, result_sum_all_3.as_double()); - EXPECT_TRUE(SimpleTensorEngine::ref().equal(*result_sum_y, *result_sum_y)); - EXPECT_TRUE(!SimpleTensorEngine::ref().equal(*result_sum_y, *result_sum_x)); + EXPECT_EQUAL(*result_sum_y, *result_sum_y); + EXPECT_NOT_EQUAL(*result_sum_y, *result_sum_x); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp b/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp index e381ae88cbe..0431133068d 100644 --- a/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp +++ b/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp @@ -31,7 +31,7 @@ struct Eval { InterpretedFunction ifun; const Value *result; explicit Eval(const vespalib::string &expr) - : ctx(), ifun(tensor::DefaultTensorEngine::ref(), Function::parse(expr)), result(&ifun.eval(ctx)) {} + : ctx(), ifun(tensor::DefaultTensorEngine::ref(), Function::parse(expr), NodeTypes()), result(&ifun.eval(ctx)) {} bool operator==(const Eval &rhs) const { return result->equal(*rhs.result); } }; @@ -64,13 +64,13 @@ TEST("require that tensor sum over dimension works") { } TEST("require that tensor add works") { - EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:5,{x:3}:3}"), Eval("{{x:1}:1,{x:2}:2} + {{x:2}:3,{x:3}:3}")); - EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:5,{x:3}:3}"), Eval("{{x:2}:3,{x:3}:3} + {{x:1}:1,{x:2}:2}")); + EXPECT_EQUAL(Eval("{{x:2}:5}"), Eval("{{x:1}:1,{x:2}:2} + {{x:2}:3,{x:3}:3}")); + EXPECT_EQUAL(Eval("{{x:2}:5}"), Eval("{{x:2}:3,{x:3}:3} + {{x:1}:1,{x:2}:2}")); } TEST("require that tensor sub works") { - EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:-1,{x:3}:-3}"), Eval("{{x:1}:1,{x:2}:2} - {{x:2}:3,{x:3}:3}")); - EXPECT_EQUAL(Eval("{{x:1}:-1,{x:2}:1,{x:3}:3}"), Eval("{{x:2}:3,{x:3}:3} - {{x:1}:1,{x:2}:2}")); + EXPECT_EQUAL(Eval("{{x:2}:-1}"), Eval("{{x:1}:1,{x:2}:2} - {{x:2}:3,{x:3}:3}")); + EXPECT_EQUAL(Eval("{{x:2}:1}"), Eval("{{x:2}:3,{x:3}:3} - {{x:1}:1,{x:2}:2}")); } TEST("require that tensor multiply works") { @@ -78,13 +78,13 @@ TEST("require that tensor multiply works") { } TEST("require that tensor min works") { - EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:2,{x:3}:3}"), Eval("min({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})")); - EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:2,{x:3}:3}"), Eval("min({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})")); + EXPECT_EQUAL(Eval("{{x:2}:2}"), Eval("min({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})")); + EXPECT_EQUAL(Eval("{{x:2}:2}"), Eval("min({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})")); } TEST("require that tensor max works") { - EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:3,{x:3}:3}"), Eval("max({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})")); - EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:3,{x:3}:3}"), Eval("max({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})")); + EXPECT_EQUAL(Eval("{{x:2}:3}"), Eval("max({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})")); + EXPECT_EQUAL(Eval("{{x:2}:3}"), Eval("max({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})")); } TEST("require that tensor match works") { diff --git a/vespalib/src/tests/exception_classes/exception_classes_test.cpp b/vespalib/src/tests/exception_classes/exception_classes_test.cpp index f9413970281..946c3fa32e4 100644 --- a/vespalib/src/tests/exception_classes/exception_classes_test.cpp +++ b/vespalib/src/tests/exception_classes/exception_classes_test.cpp @@ -33,4 +33,17 @@ TEST("require that PortListenException with cause retains relevant information") } } +TEST("test that OOMException carries message forward.") { + const char * M = "This is the simple message."; + bool caught(false); + try { + throw OOMException(M); + ASSERT_TRUE(false); + } catch (OOMException & e) { + EXPECT_EQUAL(0, strcmp(M, e.what())); + caught = true; + } + EXPECT_TRUE(caught); +} + TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/exception_classes/mmap.cpp b/vespalib/src/tests/exception_classes/mmap.cpp index 56c2b5d8d67..fe0a8fc4556 100644 --- a/vespalib/src/tests/exception_classes/mmap.cpp +++ b/vespalib/src/tests/exception_classes/mmap.cpp @@ -1,6 +1,6 @@ #include <vespa/vespalib/util/alloc.h> -using vespalib::MMapAlloc; +using namespace vespalib::alloc; int main(int argc, char *argv[]) { if (argc != 4) { @@ -13,9 +13,9 @@ int main(int argc, char *argv[]) { virtualLimit.rlim_cur = virt; virtualLimit.rlim_max = virt; assert(setrlimit(RLIMIT_AS, &virtualLimit) == 0); - std::vector<MMapAlloc> mappings; + std::vector<Alloc> mappings; for (size_t i(0); i < numBlocks; i++) { - mappings.emplace_back(blockSize); + mappings.emplace_back(MMapAllocFactory::create(blockSize)); memset(mappings.back().get(), 0xa5, mappings.back().size()); } return 0; diff --git a/vespalib/src/tests/exception_classes/silenceuncaught_test.cpp b/vespalib/src/tests/exception_classes/silenceuncaught_test.cpp index 17961872c76..0f8e3de4595 100644 --- a/vespalib/src/tests/exception_classes/silenceuncaught_test.cpp +++ b/vespalib/src/tests/exception_classes/silenceuncaught_test.cpp @@ -6,43 +6,43 @@ using namespace vespalib; TEST("that uncaught exception causes negative exitcode.") { - SlaveProc proc("./vespalib_caught_uncaught_app uncaught"); + SlaveProc proc("exec ./vespalib_caught_uncaught_app uncaught"); proc.wait(); EXPECT_LESS(proc.getExitCode(), 0); } TEST("that uncaught silenced exception causes exitcode 66") { - SlaveProc proc("./vespalib_caught_uncaught_app silenced_and_uncaught"); + SlaveProc proc("exec ./vespalib_caught_uncaught_app silenced_and_uncaught"); proc.wait(); EXPECT_EQUAL(proc.getExitCode(), 66); } TEST("that caught silenced exception followed by an uncaught causes negative exitcode.") { - SlaveProc proc("./vespalib_caught_uncaught_app uncaught_after_silenced_and_caught"); + SlaveProc proc("exec ./vespalib_caught_uncaught_app uncaught_after_silenced_and_caught"); proc.wait(); EXPECT_LESS(proc.getExitCode(), 0); } TEST("that caught silenced exception causes exitcode 0") { - SlaveProc proc("./vespalib_caught_uncaught_app silenced_and_caught"); + SlaveProc proc("exec ./vespalib_caught_uncaught_app silenced_and_caught"); proc.wait(); EXPECT_EQUAL(proc.getExitCode(), 0); } TEST("that mmap within limits are fine cause exitcode 0") { - SlaveProc proc("./vespalib_mmap_app 100000000 10485760 1"); + SlaveProc proc("exec ./vespalib_mmap_app 100000000 10485760 1"); proc.wait(); EXPECT_EQUAL(proc.getExitCode(), 0); } TEST("that mmap beyond limits cause negative exitcode.") { - SlaveProc proc("./vespalib_mmap_app 100000000 10485760 10"); + SlaveProc proc("exec ./vespalib_mmap_app 100000000 10485760 10"); proc.wait(); EXPECT_LESS(proc.getExitCode(), 0); } TEST("that mmap beyond limits with set VESPA_SILENCE_CORE_ON_OOM cause exitcode 66.") { - SlaveProc proc("VESPA_SILENCE_CORE_ON_OOM=1 ./vespalib_mmap_app 100000000 10485760 10"); + SlaveProc proc("VESPA_SILENCE_CORE_ON_OOM=1 exec ./vespalib_mmap_app 100000000 10485760 10"); proc.wait(); EXPECT_EQUAL(proc.getExitCode(), 66); } diff --git a/vespalib/src/tests/tensor/compact_tensor_builder/.gitignore b/vespalib/src/tests/tensor/compact_tensor_builder/.gitignore deleted file mode 100644 index 57abfa0f6a9..00000000000 --- a/vespalib/src/tests/tensor/compact_tensor_builder/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vespalib_compact_tensor_builder_test_app diff --git a/vespalib/src/tests/tensor/compact_tensor_builder/CMakeLists.txt b/vespalib/src/tests/tensor/compact_tensor_builder/CMakeLists.txt deleted file mode 100644 index c9cd5c0387d..00000000000 --- a/vespalib/src/tests/tensor/compact_tensor_builder/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(vespalib_compact_tensor_builder_test_app TEST - SOURCES - compact_tensor_builder_test.cpp - DEPENDS - vespalib - vespalib_vespalib_tensor -) -vespa_add_test(NAME vespalib_compact_tensor_builder_test_app COMMAND vespalib_compact_tensor_builder_test_app) diff --git a/vespalib/src/tests/tensor/compact_tensor_builder/FILES b/vespalib/src/tests/tensor/compact_tensor_builder/FILES deleted file mode 100644 index f31a47be268..00000000000 --- a/vespalib/src/tests/tensor/compact_tensor_builder/FILES +++ /dev/null @@ -1 +0,0 @@ -compact_tensor_builder_test.cpp diff --git a/vespalib/src/tests/tensor/compact_tensor_builder/compact_tensor_builder_test.cpp b/vespalib/src/tests/tensor/compact_tensor_builder/compact_tensor_builder_test.cpp deleted file mode 100644 index e8b5d3e141c..00000000000 --- a/vespalib/src/tests/tensor/compact_tensor_builder/compact_tensor_builder_test.cpp +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/vespalib/testkit/test_kit.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_builder.h> - -using namespace vespalib::tensor; - - -void -assertCellValue(double expValue, const TensorAddress &address, const CompactTensor::Cells &cells) -{ - CompactTensorAddressBuilder addressBuilder; - for (const auto &element : address.elements()) { - addressBuilder.add(element.dimension(), element.label()); - } - CompactTensorAddressRef addressRef(addressBuilder.getAddressRef()); - auto itr = cells.find(addressRef); - EXPECT_FALSE(itr == cells.end()); - EXPECT_EQUAL(expValue, itr->second); -} - -TEST("require that tensor can be constructed") -{ - CompactTensorBuilder builder; - builder.add_label(builder.define_dimension("a"), "1"). - add_label(builder.define_dimension("b"), "2").add_cell(10). - add_label(builder.define_dimension("c"), "3"). - add_label(builder.define_dimension("d"), "4").add_cell(20); - Tensor::UP tensor = builder.build(); - const CompactTensor &compactTensor = dynamic_cast<const CompactTensor &>(*tensor); - const CompactTensor::Cells &cells = compactTensor.cells(); - EXPECT_EQUAL(2u, cells.size()); - assertCellValue(10, TensorAddress({{"a","1"},{"b","2"}}), cells); - assertCellValue(20, TensorAddress({{"c","3"},{"d","4"}}), cells); -} - -TEST("require that dimensions are extracted") -{ - CompactTensorBuilder builder; - builder.define_dimension("c"); - builder.define_dimension("a"); - builder.define_dimension("b"); - builder. - add_label(builder.define_dimension("a"), "1"). - add_label(builder.define_dimension("b"), "2").add_cell(10). - add_label(builder.define_dimension("b"), "3"). - add_label(builder.define_dimension("c"), "4").add_cell(20); - Tensor::UP tensor = builder.build(); - const CompactTensor &compactTensor = dynamic_cast<const CompactTensor &>(*tensor); - const CompactTensor::Dimensions &dims = compactTensor.dimensions(); - EXPECT_EQUAL(3u, dims.size()); - EXPECT_EQUAL("a", dims[0]); - EXPECT_EQUAL("b", dims[1]); - EXPECT_EQUAL("c", dims[2]); - EXPECT_EQUAL("tensor(a{},b{},c{})", compactTensor.getType().to_spec()); -} - -TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/tensor/compact_tensor_v2_builder/.gitignore b/vespalib/src/tests/tensor/compact_tensor_v2_builder/.gitignore deleted file mode 100644 index 22edb7555be..00000000000 --- a/vespalib/src/tests/tensor/compact_tensor_v2_builder/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vespalib_compact_tensor_v2_builder_test_app diff --git a/vespalib/src/tests/tensor/compact_tensor_v2_builder/CMakeLists.txt b/vespalib/src/tests/tensor/compact_tensor_v2_builder/CMakeLists.txt deleted file mode 100644 index 2bddcd3f021..00000000000 --- a/vespalib/src/tests/tensor/compact_tensor_v2_builder/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(vespalib_compact_tensor_v2_builder_test_app TEST - SOURCES - compact_tensor_v2_builder_test.cpp - DEPENDS - vespalib - vespalib_vespalib_tensor -) -vespa_add_test(NAME vespalib_compact_tensor_v2_builder_test_app COMMAND vespalib_compact_tensor_v2_builder_test_app) diff --git a/vespalib/src/tests/tensor/compact_tensor_v2_builder/FILES b/vespalib/src/tests/tensor/compact_tensor_v2_builder/FILES deleted file mode 100644 index 196c68833da..00000000000 --- a/vespalib/src/tests/tensor/compact_tensor_v2_builder/FILES +++ /dev/null @@ -1 +0,0 @@ -compact_tensor_v2_builder_test.cpp diff --git a/vespalib/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt b/vespalib/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt new file mode 100644 index 00000000000..65e7c711b19 --- /dev/null +++ b/vespalib/src/tests/tensor/dense_tensor_address_combiner/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +vespa_add_executable(vespalib_dense_tensor_address_combiner_test_app TEST + SOURCES + dense_tensor_address_combiner_test.cpp + DEPENDS + vespalib + vespalib_vespalib_tensor +) +vespa_add_test(NAME vespalib_dense_tensor_address_combiner_test_app COMMAND vespalib_dense_tensor_address_combiner_test_app) diff --git a/vespalib/src/tests/tensor/dense_tensor_address_combiner/FILES b/vespalib/src/tests/tensor/dense_tensor_address_combiner/FILES new file mode 100644 index 00000000000..0a49bd4647b --- /dev/null +++ b/vespalib/src/tests/tensor/dense_tensor_address_combiner/FILES @@ -0,0 +1 @@ +dense_tensor_address_combiner_test.cpp diff --git a/vespalib/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp b/vespalib/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp new file mode 100644 index 00000000000..1192469e006 --- /dev/null +++ b/vespalib/src/tests/tensor/dense_tensor_address_combiner/dense_tensor_address_combiner_test.cpp @@ -0,0 +1,36 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/vespalib/testkit/test_kit.h> +#include <vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h> +#include <vespa/vespalib/test/insertion_operators.h> + +using namespace vespalib::tensor; +using DimensionsMeta = DenseTensor::DimensionsMeta; + +std::ostream & +operator<<(std::ostream &out, const DenseTensor::DimensionMeta &dimMeta) +{ + out << dimMeta.dimension() << "[" << dimMeta.size() << "]"; + return out; +} + +DimensionsMeta +combine(const DimensionsMeta &lhs, const DimensionsMeta &rhs) +{ + return DenseTensorAddressCombiner::combineDimensions(lhs, rhs); +} + +TEST("require that dimensions can be combined") +{ + EXPECT_EQUAL(DimensionsMeta({{"a", 3}, {"b", 5}}), combine({{"a", 3}}, {{"b", 5}})); + EXPECT_EQUAL(DimensionsMeta({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 5}}, {{"b", 5}})); + EXPECT_EQUAL(DimensionsMeta({{"a", 3}, {"b", 5}}), combine({{"a", 3}, {"b", 7}}, {{"b", 5}})); + EXPECT_EQUAL(DimensionsMeta({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}), + combine({{"a", 3}, {"c", 5}, {"d", 7}}, + {{"b", 11}, {"c", 13}, {"e", 17}})); + EXPECT_EQUAL(DimensionsMeta({{"a", 3}, {"b", 11}, {"c", 5}, {"d", 7}, {"e", 17}}), + combine({{"b", 11}, {"c", 13}, {"e", 17}}, + {{"a", 3}, {"c", 5}, {"d", 7}})); +} + +TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp b/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp index 8478d46e1f4..595b3743625 100644 --- a/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp +++ b/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp @@ -4,11 +4,11 @@ #include <vespa/vespalib/testkit/test_kit.h> #include <vespa/vespalib/tensor/dense/dense_tensor_builder.h> #include <vespa/vespalib/util/exceptions.h> -#include <algorithm> using namespace vespalib::tensor; using vespalib::IllegalArgumentException; using Builder = DenseTensorBuilder; +using vespalib::eval::TensorSpec; void assertTensor(const DenseTensor::DimensionsMeta &expDims, @@ -20,33 +20,71 @@ assertTensor(const DenseTensor::DimensionsMeta &expDims, EXPECT_EQUAL(expCells, realTensor.cells()); } +void +assertTensorSpec(const TensorSpec &expSpec, const Tensor &tensor) +{ + TensorSpec actSpec = tensor.toSpec(); + EXPECT_EQUAL(expSpec, actSpec); +} + struct Fixture { Builder builder; }; +Tensor::UP +build1DTensor(Builder &builder) +{ + Builder::Dimension dimX = builder.defineDimension("x", 3); + builder.addLabel(dimX, 0).addCell(10). + addLabel(dimX, 1).addCell(11). + addLabel(dimX, 2).addCell(12); + return builder.build(); +} + TEST_F("require that 1d tensor can be constructed", Fixture) { - Builder::Dimension dimX = f.builder.defineDimension("x", 3); - f.builder.addLabel(dimX, 0).addCell(10). - addLabel(dimX, 1).addCell(11). - addLabel(dimX, 2).addCell(12); - assertTensor({{"x",3}}, {10,11,12}, - *f.builder.build()); + assertTensor({{"x",3}}, {10,11,12}, *build1DTensor(f.builder)); +} + +TEST_F("require that 1d tensor can be converted to tensor spec", Fixture) +{ + assertTensorSpec(TensorSpec("tensor(x[3])"). + add({{"x", 0}}, 10). + add({{"x", 1}}, 11). + add({{"x", 2}}, 12), + *build1DTensor(f.builder)); +} + +Tensor::UP +build2DTensor(Builder &builder) +{ + Builder::Dimension dimX = builder.defineDimension("x", 3); + Builder::Dimension dimY = builder.defineDimension("y", 2); + builder.addLabel(dimX, 0).addLabel(dimY, 0).addCell(10). + addLabel(dimX, 0).addLabel(dimY, 1).addCell(11). + addLabel(dimX, 1).addLabel(dimY, 0).addCell(12). + addLabel(dimX, 1).addLabel(dimY, 1).addCell(13). + addLabel(dimX, 2).addLabel(dimY, 0).addCell(14). + addLabel(dimX, 2).addLabel(dimY, 1).addCell(15); + return builder.build(); } TEST_F("require that 2d tensor can be constructed", Fixture) { - Builder::Dimension dimX = f.builder.defineDimension("x", 3); - Builder::Dimension dimY = f.builder.defineDimension("y", 2); - f.builder.addLabel(dimX, 0).addLabel(dimY, 0).addCell(10). - addLabel(dimX, 0).addLabel(dimY, 1).addCell(11). - addLabel(dimX, 1).addLabel(dimY, 0).addCell(12). - addLabel(dimX, 1).addLabel(dimY, 1).addCell(13). - addLabel(dimX, 2).addLabel(dimY, 0).addCell(14). - addLabel(dimX, 2).addLabel(dimY, 1).addCell(15); - assertTensor({{"x",3},{"y",2}}, {10,11,12,13,14,15}, - *f.builder.build()); + assertTensor({{"x",3},{"y",2}}, {10,11,12,13,14,15}, *build2DTensor(f.builder)); +} + +TEST_F("require that 2d tensor can be converted to tensor spec", Fixture) +{ + assertTensorSpec(TensorSpec("tensor(x[3],y[2])"). + add({{"x", 0},{"y", 0}}, 10). + add({{"x", 0},{"y", 1}}, 11). + add({{"x", 1},{"y", 0}}, 12). + add({{"x", 1},{"y", 1}}, 13). + add({{"x", 2},{"y", 0}}, 14). + add({{"x", 2},{"y", 1}}, 15), + *build2DTensor(f.builder)); } TEST_F("require that 3d tensor can be constructed", Fixture) @@ -189,7 +227,6 @@ TEST_F("require that already specified label throws exception", Fixture) "Label for dimension 'x' is already specified with value '0'"); } - TEST_F("require that dimensions are sorted", Fixture) { Builder::Dimension dimY = f.builder.defineDimension("y", 3); @@ -205,4 +242,9 @@ TEST_F("require that dimensions are sorted", Fixture) EXPECT_EQUAL("tensor(x[5],y[3])", denseTensor.getType().to_spec()); } + + + + + TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/tensor/dense_tensor_operations/dense_tensor_operations_test.cpp b/vespalib/src/tests/tensor/dense_tensor_operations/dense_tensor_operations_test.cpp index aea81ad6b77..69642fb1658 100644 --- a/vespalib/src/tests/tensor/dense_tensor_operations/dense_tensor_operations_test.cpp +++ b/vespalib/src/tests/tensor/dense_tensor_operations/dense_tensor_operations_test.cpp @@ -212,76 +212,88 @@ template <typename FixtureType> void testTensorAdd(FixtureType &f) { - f.assertAdd({},{},{}, false); - f.assertAdd({ {{{"x",0}}, 8} }, - { {{{"x",0}}, 3} }, - { {{{"x",0}}, 5} }); - f.assertAdd({ {{{"x",0}}, -2} }, - { {{{"x",0}}, 3} }, - { {{{"x",0}}, -5} }); - f.assertAdd({ {{{"x",0}}, 10}, {{{"x",1}}, 16} }, - { {{{"x",0}}, 3}, {{{"x",1}}, 5} }, - { {{{"x",0}}, 7}, {{{"x",1}}, 11} }); - f.assertAdd({ {{{"x",0},{"y",0}}, 8} }, - { {{{"x",0},{"y",0}}, 3} }, - { {{{"x",0},{"y",0}}, 5} }); + TEST_DO(f.assertAdd({},{},{}, false)); + TEST_DO(f.assertAdd({ {{{"x",0}}, 8} }, + { {{{"x",0}}, 3} }, + { {{{"x",0}}, 5} })); + TEST_DO(f.assertAdd({ {{{"x",0}}, -2} }, + { {{{"x",0}}, 3} }, + { {{{"x",0}}, -5} })); + TEST_DO(f.assertAdd({ {{{"x",0}}, 10}, {{{"x",1}}, 16} }, + { {{{"x",0}}, 3}, {{{"x",1}}, 5} }, + { {{{"x",0}}, 7}, {{{"x",1}}, 11} })); + TEST_DO(f.assertAdd({ {{{"x",0},{"y",0}}, 8} }, + { {{{"x",0},{"y",0}}, 3} }, + { {{{"x",0},{"y",0}}, 5} })); + TEST_DO(f.assertAdd({ {{{"x",0}}, 3} }, + { {{{"x",0}}, 3} }, + { {{{"x",1}}, 5} })); } template <typename FixtureType> void testTensorSubtract(FixtureType &f) { - f.assertSubtract({},{},{}, false); - f.assertSubtract({ {{{"x",0}}, -2} }, - { {{{"x",0}}, 3} }, - { {{{"x",0}}, 5} }); - f.assertSubtract({ {{{"x",0}}, 8} }, - { {{{"x",0}}, 3} }, - { {{{"x",0}}, -5} }); - f.assertSubtract({ {{{"x",0}}, -4}, {{{"x",1}}, -6} }, - { {{{"x",0}}, 3}, {{{"x",1}}, 5} }, - { {{{"x",0}}, 7}, {{{"x",1}}, 11} }); - f.assertSubtract({ {{{"x",0},{"y",0}}, -2} }, - { {{{"x",0},{"y",0}}, 3} }, - { {{{"x",0},{"y",0}}, 5} }); + TEST_DO(f.assertSubtract({},{},{}, false)); + TEST_DO(f.assertSubtract({ {{{"x",0}}, -2} }, + { {{{"x",0}}, 3} }, + { {{{"x",0}}, 5} })); + TEST_DO(f.assertSubtract({ {{{"x",0}}, 8} }, + { {{{"x",0}}, 3} }, + { {{{"x",0}}, -5} })); + TEST_DO(f.assertSubtract({ {{{"x",0}}, -4}, {{{"x",1}}, -6} }, + { {{{"x",0}}, 3}, {{{"x",1}}, 5} }, + { {{{"x",0}}, 7}, {{{"x",1}}, 11} })); + TEST_DO(f.assertSubtract({ {{{"x",0},{"y",0}}, -2} }, + { {{{"x",0},{"y",0}}, 3} }, + { {{{"x",0},{"y",0}}, 5} })); + TEST_DO(f.assertSubtract({ {{{"x",0}}, -5} }, + { {{{"x",1}}, 3} }, + { {{{"x",0}}, 5} })); } template <typename FixtureType> void testTensorMin(FixtureType &f) { - f.assertMin({},{},{}, false); - f.assertMin({ {{{"x",0}}, 3} }, - { {{{"x",0}}, 3} }, - { {{{"x",0}}, 5} }); - f.assertMin({ {{{"x",0}}, -5} }, - { {{{"x",0}}, 3} }, - { {{{"x",0}}, -5} }); - f.assertMin({ {{{"x",0}}, 3}, {{{"x",1}}, 5} }, - { {{{"x",0}}, 3}, {{{"x",1}}, 5} }, - { {{{"x",0}}, 7}, {{{"x",1}}, 11} }); - f.assertMin({ {{{"x",0},{"y",0}}, 3} }, - { {{{"x",0},{"y",0}}, 3} }, - { {{{"x",0},{"y",0}}, 5} }); + TEST_DO(f.assertMin({},{},{}, false)); + TEST_DO(f.assertMin({ {{{"x",0}}, 3} }, + { {{{"x",0}}, 3} }, + { {{{"x",0}}, 5} })); + TEST_DO(f.assertMin({ {{{"x",0}}, -5} }, + { {{{"x",0}}, 3} }, + { {{{"x",0}}, -5} })); + TEST_DO(f.assertMin({ {{{"x",0}}, 3}, {{{"x",1}}, 5} }, + { {{{"x",0}}, 3}, {{{"x",1}}, 5} }, + { {{{"x",0}}, 7}, {{{"x",1}}, 11} })); + TEST_DO(f.assertMin({ {{{"x",0},{"y",0}}, 3} }, + { {{{"x",0},{"y",0}}, 3} }, + { {{{"x",0},{"y",0}}, 5} })); + TEST_DO(f.assertMin({ {{{"x",0}}, 0} }, + { {{{"x",1}}, 3} }, + { {{{"x",0}}, 5} })); } template <typename FixtureType> void testTensorMax(FixtureType &f) { - f.assertMax({},{},{}, false); - f.assertMax({ {{{"x",0}}, 5} }, - { {{{"x",0}}, 3} }, - { {{{"x",0}}, 5} }); - f.assertMax({ {{{"x",0}}, 3} }, - { {{{"x",0}}, 3} }, - { {{{"x",0}}, -5} }); - f.assertMax({ {{{"x",0}}, 7}, {{{"x",1}}, 11} }, - { {{{"x",0}}, 3}, {{{"x",1}}, 5} }, - { {{{"x",0}}, 7}, {{{"x",1}}, 11} }); - f.assertMax({ {{{"x",0},{"y",0}}, 5} }, - { {{{"x",0},{"y",0}}, 3} }, - { {{{"x",0},{"y",0}}, 5} }); + TEST_DO(f.assertMax({},{},{}, false)); + TEST_DO(f.assertMax({ {{{"x",0}}, 5} }, + { {{{"x",0}}, 3} }, + { {{{"x",0}}, 5} })); + TEST_DO(f.assertMax({ {{{"x",0}}, 3} }, + { {{{"x",0}}, 3} }, + { {{{"x",0}}, -5} })); + TEST_DO(f.assertMax({ {{{"x",0}}, 7}, {{{"x",1}}, 11} }, + { {{{"x",0}}, 3}, {{{"x",1}}, 5} }, + { {{{"x",0}}, 7}, {{{"x",1}}, 11} })); + TEST_DO(f.assertMax({ {{{"x",0},{"y",0}}, 5} }, + { {{{"x",0},{"y",0}}, 3} }, + { {{{"x",0},{"y",0}}, 5} })); + TEST_DO(f.assertMax({ {{{"x",0}}, 5} }, + { {{{"x",1}}, 3} }, + { {{{"x",0}}, 5} })); } template <typename FixtureType> diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/.gitignore b/vespalib/src/tests/tensor/join_tensor_addresses/.gitignore deleted file mode 100644 index bcf856a9f59..00000000000 --- a/vespalib/src/tests/tensor/join_tensor_addresses/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vespalib_join_tensor_addresses_test_app diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt b/vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt deleted file mode 100644 index 6923cbc1133..00000000000 --- a/vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(vespalib_join_tensor_addresses_test_app TEST - SOURCES - join_tensor_addresses_test.cpp - DEPENDS - vespalib - vespalib_vespalib_tensor -) -vespa_add_test(NAME vespalib_join_tensor_addresses_test_app COMMAND vespalib_join_tensor_addresses_test_app) diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/FILES b/vespalib/src/tests/tensor/join_tensor_addresses/FILES deleted file mode 100644 index ad4ab2f6d87..00000000000 --- a/vespalib/src/tests/tensor/join_tensor_addresses/FILES +++ /dev/null @@ -1 +0,0 @@ -join_tensor_addresses_test.cpp diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp b/vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp deleted file mode 100644 index f00cd23e322..00000000000 --- a/vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/vespalib/testkit/test_kit.h> -#include <vespa/vespalib/stllike/hash_set.h> -#include <vespa/vespalib/tensor/tensor_address.h> -#include <vespa/vespalib/tensor/tensor_address_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_address.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_address_builder.h> -#include <vespa/vespalib/tensor/tensor_address_element_iterator.h> -#include <vespa/vespalib/tensor/dimensions_vector_iterator.h> -#include <vespa/vespalib/tensor/join_tensor_addresses.h> - -using namespace vespalib::tensor; - -using TensorAddressMap = std::map<std::string, std::string>; -using TensorAddressElementVec = - std::vector<std::pair<std::string, std::string>>; - -namespace vespalib -{ - -std::ostream & -operator<<(std::ostream &out, const TensorAddressElementVec &vec) -{ - out << "{"; - bool first = true; - for (const auto &elem : vec) { - if (!first) { - out << ","; - } - out << "{\"" << elem.first << "\",\"" << elem.second << "\"}"; - first = false; - } - out << "}"; - return out; -}; - -} - - -class DummyAddressBuilder -{ - TensorAddressElementVec _elements; -public: - void add(vespalib::stringref dimension, vespalib::stringref label) - { - _elements.emplace_back(dimension, label); - } - - const TensorAddressElementVec &elements() const { return _elements; } - void clear() { } -}; - - -template <class TensorAddressT> struct FixtureBase; - -template <> struct FixtureBase<TensorAddress> -{ - using AddressType = TensorAddress; - using AddressBuilderType = TensorAddressBuilder; - - static TensorAddress create(TensorAddressBuilder &builder) { - return builder.build(); - } -}; - - -template <> struct FixtureBase<CompactTensorAddress> -{ - using AddressType = CompactTensorAddress; - using AddressBuilderType = CompactTensorAddressBuilder; - - vespalib::Stash _stash; - - CompactTensorAddress - create(CompactTensorAddressBuilder &builder) - { - CompactTensorAddressRef oldRef = builder.getAddressRef(); - CompactTensorAddressRef newRef(oldRef, _stash); - CompactTensorAddress ret; - ret.deserializeFromSparseAddressRef(newRef); - return ret; - } -}; - -template <> struct FixtureBase<CompactTensorAddressRef> -{ - using AddressType = CompactTensorAddressRef; - using AddressBuilderType = CompactTensorAddressBuilder; - - vespalib::Stash _stash; - - CompactTensorAddressRef - create(CompactTensorAddressBuilder &builder) - { - CompactTensorAddressRef oldRef = builder.getAddressRef(); - CompactTensorAddressRef newRef(oldRef, _stash); - return newRef; - } -}; - -template <class TensorAddressT> struct Fixture - : public FixtureBase<TensorAddressT> -{ - using Parent = FixtureBase<TensorAddressT>; - using AddressType = typename Parent::AddressType; - using AddressBuilderType = typename Parent::AddressBuilderType; - using Parent::create; - - AddressType - create(const TensorAddressMap &address_in) { - AddressBuilderType builder; - for (auto &element : address_in) { - builder.add(element.first, element.second); - } - return create(builder); - } - - void - verifyJoin3Way(bool exp, - const TensorAddressElementVec &expVec, - const DimensionsVector &commonDimensions, - const TensorAddressMap &lhsAddress_in, - const TensorAddressMap &rhsAddress_in) - { - AddressType expAddress = create(lhsAddress_in); - AddressType lhsAddress = create(lhsAddress_in); - AddressType rhsAddress = create(rhsAddress_in); - DummyAddressBuilder builder; - bool act = joinTensorAddresses<DummyAddressBuilder, - AddressType, AddressType> - (builder, commonDimensions, lhsAddress, rhsAddress); - EXPECT_EQUAL(exp, act); - if (exp) { - EXPECT_EQUAL(expVec, builder.elements()); - } - } - - void - verifyJoin2Way(bool exp, - const TensorAddressElementVec &expVec, - const DimensionsSet &commonDimensions, - const TensorAddressMap &lhsAddress_in, - const TensorAddressMap &rhsAddress_in) - { - AddressType expAddress = create(lhsAddress_in); - AddressType lhsAddress = create(lhsAddress_in); - AddressType rhsAddress = create(rhsAddress_in); - DummyAddressBuilder builder; - bool act = joinTensorAddresses<DummyAddressBuilder, - AddressType, AddressType> - (builder, commonDimensions, lhsAddress, rhsAddress); - EXPECT_EQUAL(exp, act); - if (exp) { - EXPECT_EQUAL(expVec, builder.elements()); - } - } - - void - verifyJoin(bool exp, - const TensorAddressElementVec &expVec, - const DimensionsVector &commonDimensions, - const TensorAddressMap &lhsAddress, - const TensorAddressMap &rhsAddress) - { - TEST_DO(verifyJoin3Way(exp, expVec, commonDimensions, - lhsAddress, rhsAddress)); - DimensionsSet commonDimensionsSet(commonDimensions.begin(), - commonDimensions.end()); - TEST_DO(verifyJoin2Way(exp, expVec, commonDimensionsSet, - lhsAddress, rhsAddress)); - } - - void - verifyJoin(const TensorAddressElementVec &expVec, - const DimensionsVector &commonDimensions, - const TensorAddressMap &lhsAddress, - const TensorAddressMap &rhsAddress) - { - verifyJoin(true, expVec, commonDimensions, lhsAddress, rhsAddress); - } - - void - verifyJoinFailure(const DimensionsVector &commonDimensions, - const TensorAddressMap &lhsAddress, - const TensorAddressMap &rhsAddress) - { - verifyJoin(false, {}, commonDimensions, lhsAddress, rhsAddress); - } - - void - verifyJoinFailureOnLabelMisMatch() - { - TEST_DO(verifyJoinFailure({"x", "y"}, - {{"x", "1"}, {"y", "2"}}, - {{"x", "1"}, {"y", "3"}})); - TEST_DO(verifyJoinFailure({"x", "y"}, - {{"x", "1"}, {"y", "2"}}, - {{"x", "2"}, {"y", "2"}})); - TEST_DO(verifyJoinFailure({"y"}, - {{"x", "1"}, {"y", "2"}}, - {{"y", "1"}, {"z", "3"}})); - TEST_DO(verifyJoinFailure({"y"}, - {{"y", "2"}, {"z", "3"}}, - {{"x", "1"}, {"y", "1"}})); - } - - void - verityJoinFailureOnMissingDimension() - { - TEST_DO(verifyJoinFailure({"x", "y"}, - {{"y", "2"}}, - {{"x", "2"}, {"y", "2"}})); - TEST_DO(verifyJoinFailure({"x", "y"}, - {{"x", "1"}, {"y", "2"}}, - {{"y", "2"}})); - TEST_DO(verifyJoinFailure({"x", "y"}, - {{"x", "1"}}, - {{"x", "2"}, {"y", "2"}})); - TEST_DO(verifyJoinFailure({"x", "y"}, - {{"x", "1"}, {"y", "2"}}, - {{"x", "2"}})); - TEST_DO(verifyJoinFailure({"x", "y", "z"}, - {{"x", "1"}, {"z", "3"}}, - {{"x", "2"}, {"y", "2"}, {"z", "3"}})); - TEST_DO(verifyJoinFailure({"x", "y", "z"}, - {{"x", "2"}, {"y", "2"}, {"z", "3"}}, - {{"x", "1"}, {"z", "3"}})); - } - - void - verifyJoinSuccessOnDisjunctDimensions() - { - TEST_DO(verifyJoin({}, {}, {}, {})); - TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}, {"zz", "4"}}, - {}, - {{"x", "1"}, {"y", "2"}}, - {{"z", "3"}, {"zz", "4"}})); - TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}, {"zz", "4"}}, - {}, - {{"z", "3"}, {"zz", "4"}}, - {{"x", "1"}, {"y", "2"}})); - TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}, {"zz", "4"}}, - {}, - {{"x", "1"}, {"z", "3"}}, - {{"y", "2"}, {"zz", "4"}})); - TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}}, - {}, - {{"x", "1"}, {"y", "2"}}, - {})); - TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}}, - {}, - {}, - {{"x", "1"}, {"y", "2"}})); - TEST_DO(verifyJoin({{"x", "1"}, {"z", "3"}}, {"y"}, - {{"x", "1"}}, - {{"z", "3"}})); - TEST_DO(verifyJoin( {{"x", "1"}, {"z", "3"}}, {"y"}, - {{"z", "3"}}, - {{"x", "1"}})); - } - - void - verifyJoinSuccessOnOverlappingDimensions() - { - TEST_DO(verifyJoin({{"x", "1"}}, {"x"}, - {{"x", "1"}}, {{"x", "1"}})); - TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}}, - {"x", "z"}, - {{"x", "1"}, {"y", "2"}, {"z", "3"}}, - {{"x", "1"}, {"z", "3"}})); - TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}}, - {"x", "z"}, - {{"x", "1"}, {"y", "2"}, {"z", "3"}}, - {{"x", "1"}, {"z", "3"}})); - TEST_DO(verifyJoin( {{"x", "1"}, {"y", "2"}}, {"x", "y"}, - {{"x", "1"}, {"y", "2"}}, - {{"x", "1"}, {"y", "2"}})); - TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}}, {"y"}, - {{"x", "1"}, {"y", "2"}}, - {{"y", "2"}, {"z", "3"}})); - TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}}, {"y"}, - {{"y", "2"}, {"z", "3"}}, - {{"x", "1"}, {"y", "2"}})); - } - - void - verifyJoin() - { - verifyJoinSuccessOnDisjunctDimensions(); - verifyJoinSuccessOnOverlappingDimensions(); - verifyJoinFailureOnLabelMisMatch(); - verityJoinFailureOnMissingDimension(); - } - -}; - - -TEST_F("Test that Tensor address can be joined", Fixture<TensorAddress>) -{ - f.verifyJoin(); -} - -TEST_F("Test that compact Tensor address can be joined", - Fixture<CompactTensorAddress>) -{ - f.verifyJoin(); -} - - -TEST_F("Test that compact Tensor address ref can be joined", - Fixture<CompactTensorAddressRef>) -{ - f.verifyJoin(); -} - -TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/tensor/simple_tensor_builder/.gitignore b/vespalib/src/tests/tensor/simple_tensor_builder/.gitignore deleted file mode 100644 index b8e9dc6dfc5..00000000000 --- a/vespalib/src/tests/tensor/simple_tensor_builder/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vespalib_simple_tensor_builder_test_app diff --git a/vespalib/src/tests/tensor/simple_tensor_builder/CMakeLists.txt b/vespalib/src/tests/tensor/simple_tensor_builder/CMakeLists.txt deleted file mode 100644 index cd67231d418..00000000000 --- a/vespalib/src/tests/tensor/simple_tensor_builder/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(vespalib_simple_tensor_builder_test_app TEST - SOURCES - simple_tensor_builder_test.cpp - DEPENDS - vespalib - vespalib_vespalib_tensor -) -vespa_add_test(NAME vespalib_simple_tensor_builder_test_app COMMAND vespalib_simple_tensor_builder_test_app) diff --git a/vespalib/src/tests/tensor/simple_tensor_builder/FILES b/vespalib/src/tests/tensor/simple_tensor_builder/FILES deleted file mode 100644 index e8940ccf5ef..00000000000 --- a/vespalib/src/tests/tensor/simple_tensor_builder/FILES +++ /dev/null @@ -1 +0,0 @@ -simple_tensor_builder_test.cpp diff --git a/vespalib/src/tests/tensor/simple_tensor_builder/simple_tensor_builder_test.cpp b/vespalib/src/tests/tensor/simple_tensor_builder/simple_tensor_builder_test.cpp deleted file mode 100644 index d43e1606d26..00000000000 --- a/vespalib/src/tests/tensor/simple_tensor_builder/simple_tensor_builder_test.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/vespalib/testkit/test_kit.h> -#include <vespa/vespalib/tensor/simple/simple_tensor_builder.h> - -using namespace vespalib::tensor; - -void -assertCellValue(double expValue, const TensorAddress &address, const SimpleTensor::Cells &cells) -{ - auto itr = cells.find(address); - EXPECT_FALSE(itr == cells.end()); - EXPECT_EQUAL(expValue, itr->second); -} - -TEST("require that tensor can be constructed") -{ - SimpleTensorBuilder builder; - builder.add_label(builder.define_dimension("a"), "1"). - add_label(builder.define_dimension("b"), "2").add_cell(10). - add_label(builder.define_dimension("c"), "3"). - add_label(builder.define_dimension("d"), "4").add_cell(20); - Tensor::UP tensor = builder.build(); - const SimpleTensor &simpleTensor = dynamic_cast<const SimpleTensor &>(*tensor); - const SimpleTensor::Cells &cells = simpleTensor.cells(); - EXPECT_EQUAL(2u, cells.size()); - assertCellValue(10, TensorAddress({{"a","1"},{"b","2"}}), cells); - assertCellValue(20, TensorAddress({{"c","3"},{"d","4"}}), cells); -} - -TEST("require that dimensions are extracted") -{ - SimpleTensorBuilder builder; - builder.define_dimension("c"); - builder.define_dimension("a"); - builder.define_dimension("b"); - builder. - add_label(builder.define_dimension("a"), "1"). - add_label(builder.define_dimension("b"), "2").add_cell(10). - add_label(builder.define_dimension("b"), "3"). - add_label(builder.define_dimension("c"), "4").add_cell(20); - Tensor::UP tensor = builder.build(); - const SimpleTensor &simpleTensor = dynamic_cast<const SimpleTensor &>(*tensor); - const SimpleTensor::Dimensions &dims = simpleTensor.dimensions(); - EXPECT_EQUAL(3u, dims.size()); - EXPECT_EQUAL("a", dims[0]); - EXPECT_EQUAL("b", dims[1]); - EXPECT_EQUAL("c", dims[2]); - EXPECT_EQUAL("tensor(a{},b{},c{})", simpleTensor.getType().to_spec()); -} - -TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/tensor/sparse_tensor_builder/.gitignore b/vespalib/src/tests/tensor/sparse_tensor_builder/.gitignore new file mode 100644 index 00000000000..e0316d190bb --- /dev/null +++ b/vespalib/src/tests/tensor/sparse_tensor_builder/.gitignore @@ -0,0 +1 @@ +vespalib_sparse_tensor_builder_test_app diff --git a/vespalib/src/tests/tensor/sparse_tensor_builder/CMakeLists.txt b/vespalib/src/tests/tensor/sparse_tensor_builder/CMakeLists.txt new file mode 100644 index 00000000000..c8ae7ece908 --- /dev/null +++ b/vespalib/src/tests/tensor/sparse_tensor_builder/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +vespa_add_executable(vespalib_sparse_tensor_builder_test_app TEST + SOURCES + sparse_tensor_builder_test.cpp + DEPENDS + vespalib + vespalib_vespalib_tensor +) +vespa_add_test(NAME vespalib_sparse_tensor_builder_test_app COMMAND vespalib_sparse_tensor_builder_test_app) diff --git a/vespalib/src/tests/tensor/sparse_tensor_builder/FILES b/vespalib/src/tests/tensor/sparse_tensor_builder/FILES new file mode 100644 index 00000000000..ad47666278e --- /dev/null +++ b/vespalib/src/tests/tensor/sparse_tensor_builder/FILES @@ -0,0 +1 @@ +sparse_tensor_builder_test.cpp diff --git a/vespalib/src/tests/tensor/compact_tensor_v2_builder/compact_tensor_v2_builder_test.cpp b/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp index 7dc1e6c1117..39e82abec7d 100644 --- a/vespalib/src/tests/tensor/compact_tensor_v2_builder/compact_tensor_v2_builder_test.cpp +++ b/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp @@ -1,17 +1,18 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/vespalib/testkit/test_kit.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2_builder.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h> +#include <vespa/vespalib/test/insertion_operators.h> using namespace vespalib::tensor; - +using vespalib::eval::TensorSpec; void assertCellValue(double expValue, const TensorAddress &address, const TensorDimensions &dimensions, - const CompactTensorV2::Cells &cells) + const SparseTensor::Cells &cells) { - CompactTensorV2AddressBuilder addressBuilder; + SparseTensorAddressBuilder addressBuilder; auto dimsItr = dimensions.cbegin(); auto dimsItrEnd = dimensions.cend(); for (const auto &element : address.elements()) { @@ -27,15 +28,16 @@ assertCellValue(double expValue, const TensorAddress &address, addressBuilder.add(""); ++dimsItr; } - CompactTensorAddressRef addressRef(addressBuilder.getAddressRef()); + SparseTensorAddressRef addressRef(addressBuilder.getAddressRef()); auto itr = cells.find(addressRef); EXPECT_FALSE(itr == cells.end()); EXPECT_EQUAL(expValue, itr->second); } -TEST("require that tensor can be constructed") +Tensor::UP +buildTensor() { - CompactTensorV2Builder builder; + SparseTensorBuilder builder; builder.define_dimension("c"); builder.define_dimension("d"); builder.define_dimension("a"); @@ -44,10 +46,15 @@ TEST("require that tensor can be constructed") add_label(builder.define_dimension("b"), "2").add_cell(10). add_label(builder.define_dimension("c"), "3"). add_label(builder.define_dimension("d"), "4").add_cell(20); - Tensor::UP tensor = builder.build(); - const CompactTensorV2 &compactTensor = dynamic_cast<const CompactTensorV2 &>(*tensor); - const TensorDimensions &dimensions = compactTensor.dimensions(); - const CompactTensorV2::Cells &cells = compactTensor.cells(); + return builder.build(); +} + +TEST("require that tensor can be constructed") +{ + Tensor::UP tensor = buildTensor(); + const SparseTensor &sparseTensor = dynamic_cast<const SparseTensor &>(*tensor); + const TensorDimensions &dimensions = sparseTensor.dimensions(); + const SparseTensor::Cells &cells = sparseTensor.cells(); EXPECT_EQUAL(2u, cells.size()); assertCellValue(10, TensorAddress({{"a","1"},{"b","2"}}), dimensions, cells); @@ -55,9 +62,19 @@ TEST("require that tensor can be constructed") dimensions, cells); } +TEST("require that tensor can be converted to tensor spec") +{ + Tensor::UP tensor = buildTensor(); + TensorSpec expSpec("tensor(a{},b{},c{},d{})"); + expSpec.add({{"a", "1"}, {"b", "2"}}, 10). + add({{"c", "3"}, {"d", "4"}}, 20); + TensorSpec actSpec = tensor->toSpec(); + EXPECT_EQUAL(expSpec, actSpec); +} + TEST("require that dimensions are extracted") { - CompactTensorV2Builder builder; + SparseTensorBuilder builder; builder.define_dimension("c"); builder.define_dimension("a"); builder.define_dimension("b"); @@ -67,13 +84,13 @@ TEST("require that dimensions are extracted") add_label(builder.define_dimension("b"), "3"). add_label(builder.define_dimension("c"), "4").add_cell(20); Tensor::UP tensor = builder.build(); - const CompactTensorV2 &compactTensor = dynamic_cast<const CompactTensorV2 &>(*tensor); - const TensorDimensions &dims = compactTensor.dimensions(); + const SparseTensor &sparseTensor = dynamic_cast<const SparseTensor &>(*tensor); + const TensorDimensions &dims = sparseTensor.dimensions(); EXPECT_EQUAL(3u, dims.size()); EXPECT_EQUAL("a", dims[0]); EXPECT_EQUAL("b", dims[1]); EXPECT_EQUAL("c", dims[2]); - EXPECT_EQUAL("tensor(a{},b{},c{})", compactTensor.getType().to_spec()); + EXPECT_EQUAL("tensor(a{},b{},c{})", sparseTensor.getType().to_spec()); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/tensor/tensor/.gitignore b/vespalib/src/tests/tensor/tensor/.gitignore deleted file mode 100644 index 5682a3d5a74..00000000000 --- a/vespalib/src/tests/tensor/tensor/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vespalib_tensor_test_app diff --git a/vespalib/src/tests/tensor/tensor/CMakeLists.txt b/vespalib/src/tests/tensor/tensor/CMakeLists.txt deleted file mode 100644 index bf902d3a71d..00000000000 --- a/vespalib/src/tests/tensor/tensor/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(vespalib_tensor_test_app TEST - SOURCES - tensor_test.cpp - DEPENDS - vespalib - vespalib_vespalib_tensor -) -vespa_add_test(NAME vespalib_tensor_test_app COMMAND vespalib_tensor_test_app) diff --git a/vespalib/src/tests/tensor/tensor/FILES b/vespalib/src/tests/tensor/tensor/FILES deleted file mode 100644 index 6ece9b360b5..00000000000 --- a/vespalib/src/tests/tensor/tensor/FILES +++ /dev/null @@ -1 +0,0 @@ -tensor_test.cpp diff --git a/vespalib/src/tests/tensor/tensor/tensor_test.cpp b/vespalib/src/tests/tensor/tensor/tensor_test.cpp deleted file mode 100644 index df80e6cbf18..00000000000 --- a/vespalib/src/tests/tensor/tensor/tensor_test.cpp +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/vespalib/testkit/test_kit.h> -#include <vespa/vespalib/tensor/simple/simple_tensor.h> -#include <vespa/vespalib/tensor/tensor_factory.h> -#include <vespa/vespalib/tensor/simple/simple_tensor_builder.h> - -using namespace vespalib::tensor; - -namespace -{ - -SimpleTensor::UP createTensor(const TensorCells &cells) -{ - SimpleTensorBuilder builder; - return SimpleTensor::UP(static_cast<SimpleTensor *> - (TensorFactory::create(cells, builder).release())); -} - -} - -void -assertCellValue(double expValue, const TensorAddress &address, const SimpleTensor::Cells &cells) -{ - auto itr = cells.find(address); - EXPECT_FALSE(itr == cells.end()); - EXPECT_EQUAL(expValue, itr->second); -} - -TEST("require that tensor can be constructed") -{ - SimpleTensor::UP tensor = createTensor({ {{{"a","1"},{"b","2"}},10}, {{{"c","3"},{"d","4"}},20} }); - const SimpleTensor::Cells &cells = tensor->cells(); - EXPECT_EQUAL(2u, cells.size()); - assertCellValue(10, TensorAddress({{"a","1"},{"b","2"}}), cells); - assertCellValue(20, TensorAddress({{"c","3"},{"d","4"}}), cells); -} - -TEST("require that dimensions are extracted") -{ - SimpleTensor::UP tensor = createTensor({ {{{"a","1"},{"b","2"}},10}, {{{"b","3"},{"c","4"}},20} }); - const SimpleTensor::Dimensions &dims = tensor->dimensions(); - EXPECT_EQUAL(3u, dims.size()); - EXPECT_EQUAL("a", dims[0]); - EXPECT_EQUAL("b", dims[1]); - EXPECT_EQUAL("c", dims[2]); -} - -TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore b/vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore deleted file mode 100644 index c28cf0c86f2..00000000000 --- a/vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vespalib_tensor_address_element_iterator_test_app diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt b/vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt deleted file mode 100644 index dad69af7ba3..00000000000 --- a/vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_executable(vespalib_tensor_address_element_iterator_test_app TEST - SOURCES - tensor_address_element_iterator_test.cpp - DEPENDS - vespalib - vespalib_vespalib_tensor -) -vespa_add_test(NAME vespalib_tensor_address_element_iterator_test_app COMMAND vespalib_tensor_address_element_iterator_test_app) diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/FILES b/vespalib/src/tests/tensor/tensor_address_element_iterator/FILES deleted file mode 100644 index b185a25973e..00000000000 --- a/vespalib/src/tests/tensor/tensor_address_element_iterator/FILES +++ /dev/null @@ -1 +0,0 @@ -tensor_address_element_iterator_test.cpp diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp b/vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp deleted file mode 100644 index 95fe166937d..00000000000 --- a/vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp +++ /dev/null @@ -1,343 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/vespalib/testkit/test_kit.h> -#include <vespa/vespalib/stllike/hash_set.h> -#include <vespa/vespalib/tensor/tensor_address.h> -#include <vespa/vespalib/tensor/tensor_address_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_address.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_address_builder.h> -#include <vespa/vespalib/tensor/tensor_address_element_iterator.h> - -using namespace vespalib::tensor; - -using TensorAddressMap = std::map<std::string, std::string>; -using TensorAddressElementVec = - std::vector<std::pair<std::string, std::string>>; - -namespace vespalib -{ - -std::ostream & -operator<<(std::ostream &out, const TensorAddressElementVec &vec) -{ - out << "{"; - bool first = true; - for (const auto &elem : vec) { - if (!first) { - out << ","; - } - out << "{\"" << elem.first << "\",\"" << elem.second << "\"}"; - first = false; - } - out << "}"; - return out; -}; - -} - - -class DummyAddressBuilder -{ - TensorAddressElementVec _elements; -public: - void add(vespalib::stringref dimension, vespalib::stringref label) - { - _elements.emplace_back(dimension, label); - } - - const TensorAddressElementVec &elements() const { return _elements; } -}; - - -template <class TensorAddressT> struct FixtureBase; - -template <> struct FixtureBase<TensorAddress> -{ - using AddressType = TensorAddress; - using AddressBuilderType = TensorAddressBuilder; - - static TensorAddress create(TensorAddressBuilder &builder) { - return builder.build(); - } -}; - - -template <> struct FixtureBase<CompactTensorAddress> -{ - using AddressType = CompactTensorAddress; - using AddressBuilderType = CompactTensorAddressBuilder; - - vespalib::Stash _stash; - - CompactTensorAddress - create(CompactTensorAddressBuilder &builder) - { - CompactTensorAddressRef oldRef = builder.getAddressRef(); - CompactTensorAddressRef newRef(oldRef, _stash); - CompactTensorAddress ret; - ret.deserializeFromSparseAddressRef(newRef); - return ret; - } -}; - -template <> struct FixtureBase<CompactTensorAddressRef> -{ - using AddressType = CompactTensorAddressRef; - using AddressBuilderType = CompactTensorAddressBuilder; - - vespalib::Stash _stash; - - CompactTensorAddressRef - create(CompactTensorAddressBuilder &builder) - { - CompactTensorAddressRef oldRef = builder.getAddressRef(); - CompactTensorAddressRef newRef(oldRef, _stash); - return newRef; - } -}; - -template <class TensorAddressT> struct Fixture - : public FixtureBase<TensorAddressT> -{ - using Parent = FixtureBase<TensorAddressT>; - using AddressType = typename Parent::AddressType; - using AddressBuilderType = typename Parent::AddressBuilderType; - using Parent::create; - - AddressType - create(const TensorAddressMap &address_in) { - AddressBuilderType builder; - for (auto &element : address_in) { - builder.add(element.first, element.second); - } - return create(builder); - } - - void - verifyPlainIterate(const TensorAddressMap &address_in) - { - AddressType address = create(address_in); - TensorAddressElementIterator<AddressType> itr(address); - for (auto &element : address_in) { - EXPECT_TRUE(itr.valid()); - EXPECT_EQUAL(element.first, itr.dimension()); - EXPECT_EQUAL(element.second, itr.label()); - itr.next(); - } - EXPECT_FALSE(itr.valid()); - } - - - void - verifyPlainIterate() - { - TEST_DO(verifyPlainIterate({})); - TEST_DO(verifyPlainIterate({{"a", "1"}})); - TEST_DO(verifyPlainIterate({{"a", "1"}, {"b", "2"}})); - } - - void - verifyBeforeDimension(const TensorAddressMap &lhsAddress_in, - const TensorAddressMap &rhsAddress_in, - bool exp) - { - AddressType lhsAddress = create(lhsAddress_in); - TensorAddressElementIterator<AddressType> lhsItr(lhsAddress); - AddressType rhsAddress = create(rhsAddress_in); - TensorAddressElementIterator<AddressType> rhsItr(rhsAddress); - EXPECT_EQUAL(exp, lhsItr.beforeDimension(rhsItr)); - } - - void - verifyBeforeDimension() { - TEST_DO(verifyBeforeDimension({}, {}, false)); - TEST_DO(verifyBeforeDimension({}, {{"x", "1"}}, false)); - TEST_DO(verifyBeforeDimension({{"x", "1"}}, {}, true)); - TEST_DO(verifyBeforeDimension({{"x", "1"}}, {{"x", "2"}}, false)); - TEST_DO(verifyBeforeDimension({{"x", "1"}}, {{"y", "2"}}, true)); - TEST_DO(verifyBeforeDimension({{"y", "1"}}, {{"x", "2"}}, false)); - } - - void - verifyAtDimension(const TensorAddressMap &address_in, - vespalib::stringref dimension, - bool exp) - { - AddressType address = create(address_in); - TensorAddressElementIterator<AddressType> itr(address); - EXPECT_EQUAL(exp, itr.atDimension(dimension)); - } - - void - verifyAtDimension() - { - TEST_DO(verifyAtDimension({}, "x", false)); - TEST_DO(verifyAtDimension({{"x", "1"}}, "x", true)); - TEST_DO(verifyAtDimension({{"x", "1"}}, "y", false)); - TEST_DO(verifyAtDimension({{"y", "1"}}, "x", false)); - TEST_DO(verifyAtDimension({{"y", "1"}}, "y", true)); - } - - void - verifyAddElements(const TensorAddressMap &lhsAddress_in, - const TensorAddressMap &rhsAddress_in, - const TensorAddressElementVec &exp) - { - AddressType lhsAddress = create(lhsAddress_in); - TensorAddressElementIterator<AddressType> lhsItr(lhsAddress); - AddressType rhsAddress = create(rhsAddress_in); - TensorAddressElementIterator<AddressType> rhsItr(rhsAddress); - DummyAddressBuilder builder; - lhsItr.addElements(builder, rhsItr); - EXPECT_EQUAL(exp, builder.elements()); - } - - void verifyAddElements(const TensorAddressMap &address_in, - const TensorAddressElementVec &exp) - { - AddressType address = create(address_in); - TensorAddressElementIterator<AddressType> itr(address); - DummyAddressBuilder builder; - itr.addElements(builder); - EXPECT_EQUAL(exp, builder.elements()); - } - - void verifyAddElements(const TensorAddressMap &address_in, - const DimensionsSet &dimensions, - bool exp, - const TensorAddressElementVec &expVec) - { - AddressType address = create(address_in); - TensorAddressElementIterator<AddressType> itr(address); - DummyAddressBuilder builder; - EXPECT_EQUAL(exp, itr.addElements(builder, dimensions)); - EXPECT_EQUAL(expVec, builder.elements()); - } - - void verifyAddElements(const TensorAddressMap &lhsAddress_in, - const TensorAddressMap &rhsAddress_in, - const DimensionsSet &dimensions, - bool exp, - const TensorAddressElementVec &expVec) - { - AddressType lhsAddress = create(lhsAddress_in); - TensorAddressElementIterator<AddressType> lhsItr(lhsAddress); - AddressType rhsAddress = create(rhsAddress_in); - TensorAddressElementIterator<AddressType> rhsItr(rhsAddress); - DummyAddressBuilder builder; - ASSERT_TRUE(lhsItr.beforeDimension(rhsItr)); - EXPECT_EQUAL(exp, lhsItr.addElements(builder, dimensions, rhsItr)); - EXPECT_EQUAL(expVec, builder.elements()); - } - - void - verifyAddElements() - { - // Stop according to rhs iterator - TEST_DO(verifyAddElements({}, {}, {})); - TEST_DO(verifyAddElements({{"x", "1"}}, {}, {{"x", "1"}})); - TEST_DO(verifyAddElements({{"x", "1"}}, {{"x", "1"}}, {})); - TEST_DO(verifyAddElements({{"x", "1"}}, {{"y", "1"}}, {{"x", "1"}})); - TEST_DO(verifyAddElements({{"y", "1"}}, {{"x", "1"}}, {})); - TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"z", "1"}}, - {{"x", "1"}, {"y", "2"}})); - // Pass through everything - TEST_DO(verifyAddElements({}, {})); - TEST_DO(verifyAddElements({{"x", "1"}}, {{"x", "1"}})); - TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, - {{"x", "1"}, {"y", "2"}})); - // Filter on dimension set - TEST_DO(verifyAddElements({}, {}, true, {})); - TEST_DO(verifyAddElements({{"x", "1"}}, {}, true, {{"x", "1"}})); - TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {}, true, - {{"x", "1"}, {"y", "2"}})); - TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {"y"}, false, - {{"x", "1"}})); - // Filter on dimension set and stop according to rhs iterator - TEST_DO(verifyAddElements({{"x", "1"}}, {}, {}, true, {{"x", "1"}})); - TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {}, {}, true, - {{"x", "1"}, {"y", "2"}})); - TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"y", "2"}}, {}, - true, {{"x", "1"}})); - TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"y", "2"}}, {"y"}, - true, {{"x", "1"}})); - TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"y", "2"}}, {"x"}, - false, {})); - } -}; - - -TEST_F("Test that Tensor address can be iterated", Fixture<TensorAddress>) -{ - f.verifyPlainIterate(); -} - -TEST_F("Test that compact Tensor address can be iterated", - Fixture<CompactTensorAddress>) -{ - f.verifyPlainIterate(); -} - - -TEST_F("Test that compact Tensor address ref can be iterated", - Fixture<CompactTensorAddressRef>) -{ - f.verifyPlainIterate(); -} - -TEST_F("Test that Tensor address works with beforeDimension", - Fixture<TensorAddress>) -{ - f.verifyBeforeDimension(); -} - -TEST_F("Test that compact Tensor address works with beforeDimension", - Fixture<CompactTensorAddress>) -{ - f.verifyBeforeDimension(); -} - -TEST_F("Test that compat Tensor address ref works with beforeDimension", - Fixture<CompactTensorAddressRef>) -{ - f.verifyBeforeDimension(); -} - -TEST_F("Test that Tensor address works with atDimension", - Fixture<TensorAddress>) -{ - f.verifyAtDimension(); -} - -TEST_F("Test that compact Tensor address works with atDimension", - Fixture<CompactTensorAddress>) -{ - f.verifyAtDimension(); -} - -TEST_F("Test that compat Tensor address ref works with atDimension", - Fixture<CompactTensorAddressRef>) -{ - f.verifyAtDimension(); -} - -TEST_F("Test that Tensor address works with addElements", - Fixture<TensorAddress>) -{ - f.verifyAddElements(); -} - -TEST_F("Test that compact Tensor address works with addElements", - Fixture<CompactTensorAddress>) -{ - f.verifyAddElements(); -} - -TEST_F("Test that compat Tensor address ref works with addElements", - Fixture<CompactTensorAddressRef>) -{ - f.verifyAddElements(); -} - - -TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp b/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp index 91dea9fb0ce..238d0604ee7 100644 --- a/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp +++ b/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp @@ -8,12 +8,12 @@ using vespalib::eval::SimpleTensorEngine; using vespalib::eval::test::TensorConformance; using vespalib::tensor::DefaultTensorEngine; -TEST("require that reference tensor implementation passes conformance test") { - TEST_DO(TensorConformance::run_tests(SimpleTensorEngine::ref())); +TEST("require that reference tensor implementation passes all conformance tests") { + TEST_DO(TensorConformance::run_tests(SimpleTensorEngine::ref(), true)); } -IGNORE_TEST("require that production tensor implementation passes conformance test") { - TEST_DO(TensorConformance::run_tests(DefaultTensorEngine::ref())); +IGNORE_TEST("require that production tensor implementation passes non-mixed conformance tests") { + TEST_DO(TensorConformance::run_tests(DefaultTensorEngine::ref(), false)); } TEST_MAIN() { TEST_RUN_ALL(); } diff --git a/vespalib/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp b/vespalib/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp index 612b5366d77..6977a857944 100644 --- a/vespalib/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp +++ b/vespalib/src/tests/tensor/tensor_mapper/tensor_mapper_test.cpp @@ -2,12 +2,8 @@ #include <vespa/vespalib/testkit/test_kit.h> #include <vespa/vespalib/util/stringfmt.h> -#include <vespa/vespalib/tensor/simple/simple_tensor.h> -#include <vespa/vespalib/tensor/simple/simple_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2_builder.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h> #include <vespa/vespalib/tensor/dense/dense_tensor.h> #include <vespa/vespalib/tensor/dense/dense_tensor_builder.h> #include <vespa/vespalib/tensor/types.h> @@ -41,18 +37,8 @@ template <typename BuilderType> struct TensorTFromBuilder; template <> -struct TensorTFromBuilder<SimpleTensorBuilder> { - using TensorT = SimpleTensor; -}; - -template <> -struct TensorTFromBuilder<CompactTensorBuilder> { - using TensorT = CompactTensor; -}; - -template <> -struct TensorTFromBuilder<CompactTensorV2Builder> { - using TensorT = CompactTensorV2; +struct TensorTFromBuilder<SparseTensorBuilder> { + using TensorT = SparseTensor; }; template <typename BuilderType> @@ -130,9 +116,7 @@ struct Fixture : public FixtureBase } }; -using SimpleFixture = Fixture<SimpleTensorBuilder>; -using CompactFixture = Fixture<CompactTensorBuilder>; -using CompactV2Fixture = Fixture<CompactTensorV2Builder>; +using SparseFixture = Fixture<SparseTensorBuilder>; template <typename FixtureType> void @@ -208,17 +192,7 @@ testTensorMapper(FixtureType &f) { "x", "y" })); } -TEST_F("test tensor mapper for SimpleTensor", SimpleFixture) -{ - testTensorMapper(f); -} - -TEST_F("test tensor mapper for CompactTensor", CompactFixture) -{ - testTensorMapper(f); -} - -TEST_F("test tensor mapper for CompactTensorV2", CompactV2Fixture) +TEST_F("test tensor mapper for SparseTensor", SparseFixture) { testTensorMapper(f); } diff --git a/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp b/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp index 3948a6d68a6..5ad26e979c5 100644 --- a/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp +++ b/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp @@ -1,12 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/vespalib/testkit/test_kit.h> -#include <vespa/vespalib/tensor/simple/simple_tensor.h> -#include <vespa/vespalib/tensor/simple/simple_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2_builder.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h> #include <vespa/vespalib/tensor/types.h> #include <vespa/vespalib/tensor/tensor_factory.h> #include <vespa/vespalib/tensor/tensor_function.h> @@ -124,6 +120,10 @@ struct Fixture void assertAdd(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) { assertAddImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types); } + void assertAdd(const TensorCells &exp, const TensorDimensions &expDimensions, + const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) { + assertAddImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types); + } void assertSubtractImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) { MyInput input; function::Node_UP ir = function::subtract(function::input(lhs.getType(), input.add(lhs)), @@ -133,6 +133,9 @@ struct Fixture void assertSubtract(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) { assertSubtractImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types); } + void assertSubtract(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) { + assertSubtractImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types); + } void assertMinImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) { MyInput input; function::Node_UP ir = function::min(function::input(lhs.getType(), input.add(lhs)), @@ -142,6 +145,9 @@ struct Fixture void assertMin(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) { assertMinImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types); } + void assertMin(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) { + assertMinImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types); + } void assertMaxImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) { MyInput input; function::Node_UP ir = function::max(function::input(lhs.getType(), input.add(lhs)), @@ -151,6 +157,9 @@ struct Fixture void assertMax(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) { assertMaxImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types); } + void assertMax(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) { + assertMaxImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types); + } void assertSumImpl(double exp, const Tensor &tensor) { MyInput input; function::Node_UP ir = function::sum(function::input(tensor.getType(), input.add(tensor))); @@ -221,9 +230,7 @@ struct Fixture } }; -using SimpleFixture = Fixture<SimpleTensorBuilder>; -using CompactFixture = Fixture<CompactTensorBuilder>; -using CompactV2Fixture = Fixture<CompactTensorV2Builder>; +using SparseFixture = Fixture<SparseTensorBuilder>; template <typename FixtureType> @@ -258,42 +265,42 @@ void testTensorAdd(FixtureType &f) { f.assertAdd({},{},{}, false); - f.assertAdd({ {{{"x","1"}}, 3}, {{{"x","2"}}, 5} }, - { {{{"x","1"}}, 3} }, - { {{{"x","2"}}, 5} }); - f.assertAdd({ {{{"x","1"}}, 8} }, - { {{{"x","1"}}, 3} }, - { {{{"x","1"}}, 5} }); - f.assertAdd({ {{{"x","1"}}, -2} }, - { {{{"x","1"}}, 3} }, - { {{{"x","1"}}, -5} }); - f.assertAdd({ {{{"x","1"}}, 0} }, - { {{{"x","1"}}, 3} }, - { {{{"x","1"}}, -3} }); - f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12}, {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }); - f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }); - f.assertAdd({ {{{"y","2"}}, 12}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }); - f.assertAdd({ {{{"y","2"}}, 12}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 5} }); - f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7} }); - f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12} }, - { {{{"y","2"}}, 7} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }); - f.assertAdd({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3} }, - { {{{"z","3"}}, 11} }); - f.assertAdd({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} }, - { {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3} }); + TEST_DO(f.assertAdd({}, { "x" }, + { {{{"x","1"}}, 3} }, + { {{{"x","2"}}, 5} })); + TEST_DO(f.assertAdd({ {{{"x","1"}}, 8} }, + { {{{"x","1"}}, 3} }, + { {{{"x","1"}}, 5} })); + TEST_DO(f.assertAdd({ {{{"x","1"}}, -2} }, + { {{{"x","1"}}, 3} }, + { {{{"x","1"}}, -5} })); + TEST_DO(f.assertAdd({ {{{"x","1"}}, 0} }, + { {{{"x","1"}}, 3} }, + { {{{"x","1"}}, -3} })); + TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14}, {{{"y","2"}}, 12} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} })); + TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14}, {{{"y","2"}}, 12} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} })); + TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "y", "z" }, + { {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} })); + TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "y", "z" }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, + { {{{"y","2"}}, 5} })); + TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "x", "y" }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7} })); + TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "x", "y" }, + { {{{"y","2"}}, 7} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} })); + TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14} }, + { {{{"x","1"}}, 3} }, + { {{{"z","3"}}, 11} })); + TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14} }, + { {{{"z","3"}}, 11} }, + { {{{"x","1"}}, 3} })); } template <typename FixtureType> @@ -301,42 +308,42 @@ void testTensorSubtract(FixtureType &f) { f.assertSubtract({},{},{}, false); - f.assertSubtract({ {{{"x","1"}}, 3}, {{{"x","2"}}, -5} }, - { {{{"x","1"}}, 3} }, - { {{{"x","2"}}, 5} }); - f.assertSubtract({ {{{"x","1"}}, -2} }, - { {{{"x","1"}}, 3} }, - { {{{"x","1"}}, 5} }); - f.assertSubtract({ {{{"x","1"}}, 8} }, - { {{{"x","1"}}, 3} }, - { {{{"x","1"}}, -5} }); - f.assertSubtract({ {{{"x","1"}}, 0} }, - { {{{"x","1"}}, 3} }, - { {{{"x","1"}}, 3} }); - f.assertSubtract({ {{{"x","1"}}, 3}, {{{"y","2"}},-2}, {{{"z","3"}},-11} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }); - f.assertSubtract({ {{{"x","1"}},-3}, {{{"y","2"}}, 2}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }); - f.assertSubtract({ {{{"y","2"}},-2}, {{{"z","3"}},-11} }, - { {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }); - f.assertSubtract({ {{{"y","2"}}, 2}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 5} }); - f.assertSubtract({ {{{"x","1"}}, 3}, {{{"y","2"}},-2} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7} }); - f.assertSubtract({ {{{"x","1"}},-3}, {{{"y","2"}}, 2} }, - { {{{"y","2"}}, 7} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }); - f.assertSubtract({ {{{"x","1"}}, 3}, {{{"z","3"}},-11} }, - { {{{"x","1"}}, 3} }, - { {{{"z","3"}}, 11} }); - f.assertSubtract({ {{{"x","1"}},-3}, {{{"z","3"}}, 11} }, - { {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3} }); + TEST_DO(f.assertSubtract({}, { "x" }, + { {{{"x","1"}}, 3} }, + { {{{"x","2"}}, 5} })); + TEST_DO(f.assertSubtract({ {{{"x","1"}}, -2} }, + { {{{"x","1"}}, 3} }, + { {{{"x","1"}}, 5} })); + TEST_DO(f.assertSubtract({ {{{"x","1"}}, 8} }, + { {{{"x","1"}}, 3} }, + { {{{"x","1"}}, -5} })); + TEST_DO(f.assertSubtract({ {{{"x","1"}}, 0} }, + { {{{"x","1"}}, 3} }, + { {{{"x","1"}}, 3} })); + TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, -8}, {{{"y","2"}},-2} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} })); + TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, 8}, {{{"y","2"}}, 2} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} })); + TEST_DO(f.assertSubtract({ {{{"y","2"}},-2} }, { "y", "z" }, + { {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} })); + TEST_DO(f.assertSubtract({ {{{"y","2"}}, 2} }, { "y", "z" }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, + { {{{"y","2"}}, 5} })); + TEST_DO(f.assertSubtract({ {{{"y","2"}},-2} }, { "x", "y" }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7} })); + TEST_DO(f.assertSubtract({ {{{"y","2"}}, 2} }, { "x", "y" }, + { {{{"y","2"}}, 7} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} })); + TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, -8} }, + { {{{"x","1"}}, 3} }, + { {{{"z","3"}}, 11} })); + TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, 8} }, + { {{{"z","3"}}, 11} }, + { {{{"x","1"}}, 3} })); } template <typename FixtureType> @@ -344,42 +351,42 @@ void testTensorMin(FixtureType &f) { f.assertMin({},{},{}, false); - f.assertMin({ {{{"x","1"}}, 3}, {{{"x","2"}}, 5} }, - { {{{"x","1"}}, 3} }, - { {{{"x","2"}}, 5} }); - f.assertMin({ {{{"x","1"}}, 3} }, - { {{{"x","1"}}, 3} }, - { {{{"x","1"}}, 5} }); - f.assertMin({ {{{"x","1"}}, -5} }, - { {{{"x","1"}}, 3} }, - { {{{"x","1"}}, -5} }); - f.assertMin({ {{{"x","1"}}, 3}, {{{"x","2"}}, 0} }, - { {{{"x","1"}}, 3} }, - { {{{"x","2"}}, 0} }); - f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5}, {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }); - f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }); - f.assertMin({ {{{"y","2"}}, 5}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }); - f.assertMin({ {{{"y","2"}}, 5}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 5} }); - f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7} }); - f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }); - f.assertMin({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3} }, - { {{{"z","3"}}, 11} }); - f.assertMin({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} }, - { {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3} }); + TEST_DO(f.assertMin({}, { "x" }, + { {{{"x","1"}}, 3} }, + { {{{"x","2"}}, 5} })); + TEST_DO(f.assertMin({ {{{"x","1"}}, 3} }, + { {{{"x","1"}}, 3} }, + { {{{"x","1"}}, 5} })); + TEST_DO(f.assertMin({ {{{"x","1"}}, -5} }, + { {{{"x","1"}}, 3} }, + { {{{"x","1"}}, -5} })); + TEST_DO(f.assertMin({}, { "x" }, + { {{{"x","1"}}, 3} }, + { {{{"x","2"}}, 0} })); + TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3}, {{{"y","2"}}, 5} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} })); + TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3}, {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} })); + TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "y", "z" }, + { {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} })); + TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "y", "z" }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, + { {{{"y","2"}}, 5} })); + TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "x", "y" }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7} })); + TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "x", "y" }, + { {{{"y","2"}}, 7} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} })); + TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3} }, + { {{{"x","1"}}, 3} }, + { {{{"z","3"}}, 11} })); + TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3} }, + { {{{"z","3"}}, 11} }, + { {{{"x","1"}}, 3} })); } template <typename FixtureType> @@ -387,45 +394,45 @@ void testTensorMax(FixtureType &f) { f.assertMax({},{},{}, false); - f.assertMax({ {{{"x","1"}}, 3}, {{{"x","2"}}, 5} }, - { {{{"x","1"}}, 3} }, - { {{{"x","2"}}, 5} }); - f.assertMax({ {{{"x","1"}}, 5} }, - { {{{"x","1"}}, 3} }, - { {{{"x","1"}}, 5} }); - f.assertMax({ {{{"x","1"}}, 3} }, - { {{{"x","1"}}, 3} }, - { {{{"x","1"}}, -5} }); - f.assertMax({ {{{"x","1"}}, 3}, {{{"x","2"}}, 0} }, - { {{{"x","1"}}, 3} }, - { {{{"x","2"}}, 0} }); - f.assertMax({ {{{"x","1"}}, 3}, {{{"x","2"}}, -5} }, - { {{{"x","1"}}, 3} }, - { {{{"x","2"}}, -5} }); - f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }); - f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }); - f.assertMax({ {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }); - f.assertMax({ {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, - { {{{"y","2"}}, 5} }); - f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, - { {{{"y","2"}}, 7} }); - f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7} }, - { {{{"y","2"}}, 7} }, - { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }); - f.assertMax({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3} }, - { {{{"z","3"}}, 11} }); - f.assertMax({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} }, - { {{{"z","3"}}, 11} }, - { {{{"x","1"}}, 3} }); + TEST_DO(f.assertMax({}, { "x" }, + { {{{"x","1"}}, 3} }, + { {{{"x","2"}}, 5} })); + TEST_DO(f.assertMax({ {{{"x","1"}}, 5} }, + { {{{"x","1"}}, 3} }, + { {{{"x","1"}}, 5} })); + TEST_DO(f.assertMax({ {{{"x","1"}}, 3} }, + { {{{"x","1"}}, 3} }, + { {{{"x","1"}}, -5} })); + TEST_DO(f.assertMax({}, { "x" }, + { {{{"x","1"}}, 3} }, + { {{{"x","2"}}, 0} })); + TEST_DO(f.assertMax({}, { "x" }, + { {{{"x","1"}}, 3} }, + { {{{"x","2"}}, -5} })); + TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11}, {{{"y","2"}}, 7} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} })); + TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11}, {{{"y","2"}}, 7} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} })); + TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "y", "z" }, + { {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} })); + TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "y", "z" }, + { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }, + { {{{"y","2"}}, 5} })); + TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "x", "y" }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }, + { {{{"y","2"}}, 7} })); + TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "x", "y" }, + { {{{"y","2"}}, 7} }, + { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} })); + TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11} }, + { {{{"x","1"}}, 3} }, + { {{{"z","3"}}, 11} })); + TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11} }, + { {{{"z","3"}}, 11} }, + { {{{"x","1"}}, 3} })); } template <typename FixtureType> @@ -616,17 +623,7 @@ testAllTensorOperations(FixtureType &f) TEST_DO(testTensorSumDimension(f)); } -TEST_F("test tensor operations for SimpleTensor", SimpleFixture) -{ - testAllTensorOperations(f); -} - -TEST_F("test tensor operations for CompactTensor", CompactFixture) -{ - testAllTensorOperations(f); -} - -TEST_F("test tensor operations for CompactTensorV2", CompactV2Fixture) +TEST_F("test tensor operations for SparseTensor", SparseFixture) { testAllTensorOperations(f); } diff --git a/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp b/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp index 2b9370bb606..814f18c8cae 100644 --- a/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp +++ b/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp @@ -3,12 +3,8 @@ #include <vespa/vespalib/eval/function.h> #include <vespa/vespalib/eval/interpreted_function.h> #include <vespa/vespalib/eval/tensor_nodes.h> -#include <vespa/vespalib/tensor/compact/compact_tensor.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2_builder.h> -#include <vespa/vespalib/tensor/simple/simple_tensor.h> -#include <vespa/vespalib/tensor/simple/simple_tensor_builder.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h> #include <vespa/vespalib/tensor/dense/dense_tensor_builder.h> #include <vespa/vespalib/tensor/tensor.h> #include <vespa/vespalib/tensor/tensor_builder.h> @@ -57,9 +53,21 @@ void inject_params(const Function &function, const Params ¶ms, } } +std::vector<ValueType> extract_param_types(const Function &function, const Params ¶ms) { + std::vector<ValueType> result; + EXPECT_EQUAL(params.map.size(), function.num_params()); + for (size_t i = 0; i < function.num_params(); ++i) { + auto param = params.map.find(function.param_name(i)); + ASSERT_TRUE(param != params.map.end()); + result.push_back(param->second->type()); + } + return result; +} + double calculate_expression(const vespalib::string &expression, const Params ¶ms) { const Function function = Function::parse(expression); - const InterpretedFunction interpreted(tensor::DefaultTensorEngine::ref(), function); + const NodeTypes types(function, extract_param_types(function, params)); + const InterpretedFunction interpreted(tensor::DefaultTensorEngine::ref(), function, types); InterpretedFunction::Context context; inject_params(function, params, context); const Value &result = interpreted.eval(context); @@ -72,7 +80,8 @@ const Value &dummy_ranking(InterpretedFunction::Context &) { return dummy_result double benchmark_expression_us(const vespalib::string &expression, const Params ¶ms) { const Function function = Function::parse(expression); - const InterpretedFunction interpreted(tensor::DefaultTensorEngine::ref(), function); + const NodeTypes types(function, extract_param_types(function, params)); + const InterpretedFunction interpreted(tensor::DefaultTensorEngine::ref(), function, types); InterpretedFunction::Context context; inject_params(function, params, context); auto ranking = [&](){ interpreted.eval(context); }; @@ -86,7 +95,7 @@ tensor::Tensor::UP parse_tensor(const vespalib::string &tensor_str) { Function function = Function::parse(tensor_str); auto tensor = nodes::as<nodes::Tensor>(function.root()); ASSERT_TRUE(tensor); - SimpleTensorBuilder builder; + SparseTensorBuilder builder; for (const auto &cell: tensor->cells()) { for (const auto &dimension: cell.first) { builder.add_label(builder.define_dimension(dimension.first), dimension.second); @@ -117,8 +126,8 @@ TEST("SMOKETEST - require that model match benchmark expression produces expecte TEST("SMOKETEST - require that matrix product benchmark expression produces expected result") { Params params; - params.add("query", parse_tensor("{{x:0}:1.0}")); - params.add("document", parse_tensor("{{x:1}:2.0}")); + params.add("query", parse_tensor("{{x:0}:1.0,{x:1}:0.0}")); + params.add("document", parse_tensor("{{x:0}:0.0,{x:1}:2.0}")); params.add("model", parse_tensor("{{x:0,y:0}:1.0,{x:0,y:1}:2.0," " {x:1,y:0}:3.0,{x:1,y:1}:4.0}")); EXPECT_EQUAL(calculate_expression(matrix_product_expr, params), 17.0); @@ -226,22 +235,18 @@ tensor::Tensor::UP make_tensor_impl(const std::vector<DimensionSpec> &dimensions //----------------------------------------------------------------------------- -enum class BuilderType { DUMMY, SIMPLE, COMPACT, COMPACTV2, NUMBERDUMMY, +enum class BuilderType { DUMMY, SPARSE, NUMBERDUMMY, DENSE }; const BuilderType DUMMY = BuilderType::DUMMY; -const BuilderType SIMPLE = BuilderType::SIMPLE; -const BuilderType COMPACT = BuilderType::COMPACT; -const BuilderType COMPACTV2 = BuilderType::COMPACTV2; +const BuilderType SPARSE = BuilderType::SPARSE; const BuilderType NUMBERDUMMY = BuilderType::NUMBERDUMMY; const BuilderType DENSE = BuilderType::DENSE; const char *name(BuilderType type) { switch (type) { case BuilderType::DUMMY: return " dummy"; - case BuilderType::SIMPLE: return " simple"; - case BuilderType::COMPACT: return "compact"; - case BuilderType::COMPACTV2: return "compactv2"; + case BuilderType::SPARSE: return "sparse"; case BuilderType::NUMBERDUMMY: return "numberdummy"; case BuilderType::DENSE: return "dense"; } @@ -253,14 +258,8 @@ tensor::Tensor::UP make_tensor(BuilderType type, const std::vector<DimensionSpec case BuilderType::DUMMY: return make_tensor_impl<DummyBuilder, TensorBuilder, StringBinding> (dimensions); - case BuilderType::SIMPLE: - return make_tensor_impl<SimpleTensorBuilder, TensorBuilder, - StringBinding>(dimensions); - case BuilderType::COMPACT: - return make_tensor_impl<CompactTensorBuilder, TensorBuilder, - StringBinding>(dimensions); - case BuilderType::COMPACTV2: - return make_tensor_impl<CompactTensorV2Builder, TensorBuilder, + case BuilderType::SPARSE: + return make_tensor_impl<SparseTensorBuilder, TensorBuilder, StringBinding>(dimensions); case BuilderType::NUMBERDUMMY: return make_tensor_impl<DummyDenseTensorBuilder, @@ -289,7 +288,7 @@ double benchmark_build_us(BuilderType type, const std::vector<DimensionSpec> &sp TEST("benchmark create/destroy time for 1d tensors") { for (size_t size: {5, 10, 25, 50, 100, 250, 500}) { - for (auto type: {SIMPLE, COMPACT, COMPACTV2, DENSE}) { + for (auto type: {SPARSE, DENSE}) { double time_us = benchmark_build_us(type, {DimensionSpec("x", size)}); fprintf(stderr, "-- 1d tensor create/destroy (%s) with size %zu: %g us\n", name(type), size, time_us); } @@ -298,7 +297,7 @@ TEST("benchmark create/destroy time for 1d tensors") { TEST("benchmark create/destroy time for 2d tensors") { for (size_t size: {5, 10, 25, 50, 100}) { - for (auto type: {SIMPLE, COMPACT, COMPACTV2, DENSE}) { + for (auto type: {SPARSE, DENSE}) { double time_us = benchmark_build_us(type, {DimensionSpec("x", size), DimensionSpec("y", size)}); fprintf(stderr, "-- 2d tensor create/destroy (%s) with size %zux%zu: %g us\n", name(type), size, size, time_us); } @@ -309,7 +308,7 @@ TEST("benchmark create/destroy time for 2d tensors") { TEST("benchmark dot product using match") { for (size_t size: {10, 25, 50, 100, 250}) { - for (auto type: {SIMPLE, COMPACT, COMPACTV2, DENSE}) { + for (auto type: {SPARSE, DENSE}) { Params params; params.add("query", make_tensor(type, {DimensionSpec("x", size)})); params.add("document", make_tensor(type, {DimensionSpec("x", size)})); @@ -321,7 +320,7 @@ TEST("benchmark dot product using match") { TEST("benchmark dot product using multiply") { for (size_t size: {10, 25, 50, 100, 250}) { - for (auto type: {SIMPLE, COMPACT, COMPACTV2, DENSE}) { + for (auto type: {SPARSE, DENSE}) { Params params; params.add("query", make_tensor(type, {DimensionSpec("x", size)})); params.add("document", make_tensor(type, {DimensionSpec("x", size)})); @@ -335,7 +334,7 @@ TEST("benchmark model match") { for (size_t model_size: {25, 50, 100}) { for (size_t vector_size: {5, 10, 25, 50, 100}) { if (vector_size <= model_size) { - for (auto type: {SIMPLE, COMPACT, COMPACTV2}) { + for (auto type: {SPARSE}) { Params params; params.add("query", make_tensor(type, {DimensionSpec("x", vector_size)})); params.add("document", make_tensor(type, {DimensionSpec("y", vector_size)})); @@ -351,14 +350,10 @@ TEST("benchmark model match") { TEST("benchmark matrix product") { for (size_t vector_size: {5, 10, 25, 50}) { size_t matrix_size = vector_size * 2; - for (auto type: {SIMPLE, COMPACT, COMPACTV2, DENSE}) { + for (auto type: {SPARSE, DENSE}) { Params params; - size_t document_size = vector_size; - if (type == DENSE) { - document_size = matrix_size; - } - params.add("query", make_tensor(type, {DimensionSpec("x", vector_size, vector_size)})); - params.add("document", make_tensor(type, {DimensionSpec("x", document_size)})); + params.add("query", make_tensor(type, {DimensionSpec("x", matrix_size)})); + params.add("document", make_tensor(type, {DimensionSpec("x", matrix_size)})); params.add("model", make_tensor(type, {DimensionSpec("x", matrix_size), DimensionSpec("y", matrix_size)})); double time_us = benchmark_expression_us(matrix_product_expr, params); fprintf(stderr, "-- matrix product (%s) %zu + %zu vs %zux%zu: %g us\n", name(type), vector_size, vector_size, matrix_size, matrix_size, time_us); diff --git a/vespalib/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp b/vespalib/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp index 6a4fcefba9c..d1d713c2987 100644 --- a/vespalib/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp +++ b/vespalib/src/tests/tensor/tensor_serialization/tensor_serialization_test.cpp @@ -1,12 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/vespalib/testkit/test_kit.h> -#include <vespa/vespalib/tensor/simple/simple_tensor.h> -#include <vespa/vespalib/tensor/simple/simple_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2_builder.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h> #include <vespa/vespalib/tensor/types.h> #include <vespa/vespalib/tensor/default_tensor.h> #include <vespa/vespalib/tensor/tensor_factory.h> @@ -101,9 +97,7 @@ struct Fixture } }; -using SimpleFixture = Fixture<SimpleTensorBuilder>; -using CompactFixture = Fixture<CompactTensorBuilder>; -using CompactV2Fixture = Fixture<CompactTensorV2Builder>; +using SparseFixture = Fixture<SparseTensorBuilder>; template <typename FixtureType> @@ -140,17 +134,7 @@ testTensorSerialization(FixtureType &f) { {{{"x","1"}}, 3} }, {"x", "y"})); } -TEST_F("test tensor serialization for SimpleTensor", SimpleFixture) -{ - testTensorSerialization(f); -} - -TEST_F("test tensor serialization for CompactTensor", CompactFixture) -{ - testTensorSerialization(f); -} - -TEST_F("test tensor serialization for CompactTensorV2", CompactV2Fixture) +TEST_F("test tensor serialization for SparseTensor", SparseFixture) { testTensorSerialization(f); } diff --git a/vespalib/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp b/vespalib/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp index f53b42c433e..f3005a21730 100644 --- a/vespalib/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp +++ b/vespalib/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp @@ -1,12 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/vespalib/testkit/test_kit.h> -#include <vespa/vespalib/tensor/simple/simple_tensor.h> -#include <vespa/vespalib/tensor/simple/simple_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2.h> -#include <vespa/vespalib/tensor/compact/compact_tensor_v2_builder.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor.h> +#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h> #include <vespa/vespalib/tensor/types.h> #include <vespa/vespalib/tensor/default_tensor.h> #include <vespa/vespalib/tensor/tensor_factory.h> @@ -46,20 +42,10 @@ struct Fixture template <> uint32_t -Fixture<SimpleTensorBuilder>::getTensorTypeId() { return 0u; } +Fixture<SparseTensorBuilder>::getTensorTypeId() { return 2u; } -template <> -uint32_t -Fixture<CompactTensorBuilder>::getTensorTypeId() { return 1u; } -template <> -uint32_t -Fixture<CompactTensorV2Builder>::getTensorTypeId() { return 2u; } - - -using SimpleFixture = Fixture<SimpleTensorBuilder>; -using CompactFixture = Fixture<CompactTensorBuilder>; -using CompactV2Fixture = Fixture<CompactTensorV2Builder>; +using SparseFixture = Fixture<SparseTensorBuilder>; namespace { @@ -128,17 +114,7 @@ testTensorSlimeSerialization(FixtureType &f) {"x", "y"})); } -TEST_F("test tensor slime serialization for SimpleTensor", SimpleFixture) -{ - testTensorSlimeSerialization(f); -} - -TEST_F("test tensor slime serialization for CompactTensor", CompactFixture) -{ - testTensorSlimeSerialization(f); -} - -TEST_F("test tensor slime serialization for CompactTensorV2", CompactV2Fixture) +TEST_F("test tensor slime serialization for SparseTensor", SparseFixture) { testTensorSlimeSerialization(f); } diff --git a/vespalib/src/vespa/vespalib/data/memorydatastore.cpp b/vespalib/src/vespa/vespalib/data/memorydatastore.cpp index e6b22aa9e3f..791ea0cea50 100644 --- a/vespalib/src/vespa/vespalib/data/memorydatastore.cpp +++ b/vespalib/src/vespa/vespalib/data/memorydatastore.cpp @@ -3,13 +3,15 @@ namespace vespalib { -MemoryDataStore::MemoryDataStore(size_t initialSize, Lock * lock) : +using alloc::Alloc; + +MemoryDataStore::MemoryDataStore(Alloc && initialAlloc, Lock * lock) : _buffers(), _writePos(0), _lock(lock) { _buffers.reserve(24); - _buffers.emplace_back(initialSize); + _buffers.emplace_back(std::move(initialAlloc)); } MemoryDataStore::~MemoryDataStore() @@ -26,7 +28,7 @@ MemoryDataStore::push_back(const void * data, const size_t sz) const Alloc & b = _buffers.back(); if ((sz + _writePos) > b.size()) { size_t newSize(std::max(sz, _buffers.back().size()*2)); - _buffers.emplace_back(newSize); + _buffers.emplace_back(b.create(newSize)); _writePos = 0; } Alloc & buf = _buffers.back(); @@ -39,7 +41,7 @@ MemoryDataStore::push_back(const void * data, const size_t sz) VariableSizeVector::VariableSizeVector(size_t initialSize) : _vector(), - _store(initialSize) + _store(DefaultAlloc::create(initialSize)) { } diff --git a/vespalib/src/vespa/vespalib/data/memorydatastore.h b/vespalib/src/vespa/vespalib/data/memorydatastore.h index 9fa56d64a31..b61fa3665ad 100644 --- a/vespalib/src/vespa/vespalib/data/memorydatastore.h +++ b/vespalib/src/vespa/vespalib/data/memorydatastore.h @@ -23,7 +23,7 @@ public: private: void * _data; }; - MemoryDataStore(size_t initialSize=256, Lock * lock=nullptr); + MemoryDataStore(alloc::Alloc && initialAlloc=DefaultAlloc::create(256), Lock * lock=nullptr); MemoryDataStore(const MemoryDataStore &) = delete; MemoryDataStore & operator = (const MemoryDataStore &) = delete; ~MemoryDataStore(); @@ -38,12 +38,12 @@ public: _buffers.clear(); } private: - std::vector<DefaultAlloc> _buffers; + std::vector<alloc::Alloc> _buffers; size_t _writePos; Lock * _lock; }; -class VariableSizeVector : public noncopyable +class VariableSizeVector { public: class Reference { @@ -96,6 +96,8 @@ public: const vespalib::Array<Reference> * _vector; size_t _index; }; + VariableSizeVector(const VariableSizeVector &) = delete; + VariableSizeVector & operator = (const VariableSizeVector &) = delete; VariableSizeVector(size_t initialSize=256); ~VariableSizeVector(); iterator begin() { return iterator(_vector, 0); } diff --git a/vespalib/src/vespa/vespalib/eval/basic_nodes.cpp b/vespalib/src/vespa/vespalib/eval/basic_nodes.cpp index 241900ea156..6d1a18dff03 100644 --- a/vespalib/src/vespa/vespalib/eval/basic_nodes.cpp +++ b/vespalib/src/vespa/vespalib/eval/basic_nodes.cpp @@ -26,7 +26,7 @@ struct Frame { double Node::get_const_value() const { assert(is_const()); - InterpretedFunction function(SimpleTensorEngine::ref(), *this, 0); + InterpretedFunction function(SimpleTensorEngine::ref(), *this, 0, NodeTypes()); InterpretedFunction::Context ctx; return function.eval(ctx).as_double(); } diff --git a/vespalib/src/vespa/vespalib/eval/interpreted_function.cpp b/vespalib/src/vespa/vespalib/eval/interpreted_function.cpp index ae7e2aaae81..049d2ef81c4 100644 --- a/vespalib/src/vespa/vespalib/eval/interpreted_function.cpp +++ b/vespalib/src/vespa/vespalib/eval/interpreted_function.cpp @@ -128,9 +128,10 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { std::vector<Instruction> &program; Stash &stash; const TensorEngine &tensor_engine; + const NodeTypes &types; - ProgramBuilder(std::vector<Instruction> &program_in, Stash &stash_in, const TensorEngine &tensor_engine_in) - : program(program_in), stash(stash_in), tensor_engine(tensor_engine_in) {} + ProgramBuilder(std::vector<Instruction> &program_in, Stash &stash_in, const TensorEngine &tensor_engine_in, const NodeTypes &types_in) + : program(program_in), stash(stash_in), tensor_engine(tensor_engine_in), types(types_in) {} //------------------------------------------------------------------------- @@ -370,13 +371,13 @@ struct ProgramBuilder : public NodeVisitor, public NodeTraverser { } // namespace vespalib::<unnamed> -InterpretedFunction::InterpretedFunction(const TensorEngine &engine, const nodes::Node &root, size_t num_params_in) +InterpretedFunction::InterpretedFunction(const TensorEngine &engine, const nodes::Node &root, size_t num_params_in, const NodeTypes &types) : _program(), _stash(), _num_params(num_params_in), _tensor_engine(engine) { - ProgramBuilder program_builder(_program, _stash, _tensor_engine); + ProgramBuilder program_builder(_program, _stash, _tensor_engine, types); root.traverse(program_builder); } diff --git a/vespalib/src/vespa/vespalib/eval/interpreted_function.h b/vespalib/src/vespa/vespalib/eval/interpreted_function.h index a271ee90153..73d0f237131 100644 --- a/vespalib/src/vespa/vespalib/eval/interpreted_function.h +++ b/vespalib/src/vespa/vespalib/eval/interpreted_function.h @@ -5,6 +5,7 @@ #include "function.h" #include <vespa/vespalib/util/stash.h> #include "simple_tensor_engine.h" +#include "node_types.h" namespace vespalib { namespace eval { @@ -86,9 +87,9 @@ private: public: typedef std::unique_ptr<InterpretedFunction> UP; - InterpretedFunction(const TensorEngine &engine, const nodes::Node &root, size_t num_params_in); - InterpretedFunction(const TensorEngine &engine, const Function &function) - : InterpretedFunction(engine, function.root(), function.num_params()) {} + InterpretedFunction(const TensorEngine &engine, const nodes::Node &root, size_t num_params_in, const NodeTypes &types); + InterpretedFunction(const TensorEngine &engine, const Function &function, const NodeTypes &types) + : InterpretedFunction(engine, function.root(), function.num_params(), types) {} InterpretedFunction(InterpretedFunction &&rhs) = default; size_t num_params() const { return _num_params; } const Value &eval(Context &ctx) const; diff --git a/vespalib/src/vespa/vespalib/eval/node_types.cpp b/vespalib/src/vespa/vespalib/eval/node_types.cpp index ba9dbb2ecd1..acd8d7beca1 100644 --- a/vespalib/src/vespa/vespalib/eval/node_types.cpp +++ b/vespalib/src/vespa/vespalib/eval/node_types.cpp @@ -271,6 +271,12 @@ struct TypeResolver : public NodeVisitor, public NodeTraverser { } // namespace vespalib::eval::nodes::<unnamed> } // namespace vespalib::eval::nodes +NodeTypes::NodeTypes() + : _not_found(ValueType::any_type()), + _type_map() +{ +} + NodeTypes::NodeTypes(const Function &function, const std::vector<ValueType> &input_types) : _not_found(ValueType::error_type()), _type_map() @@ -299,7 +305,7 @@ NodeTypes::all_types_are_double() const return false; } } - return true; + return (_type_map.size() > 0); } } // namespace vespalib::eval diff --git a/vespalib/src/vespa/vespalib/eval/node_types.h b/vespalib/src/vespa/vespalib/eval/node_types.h index c137d6dc43b..1957accd959 100644 --- a/vespalib/src/vespa/vespalib/eval/node_types.h +++ b/vespalib/src/vespa/vespalib/eval/node_types.h @@ -15,7 +15,8 @@ class Function; * Class keeping track of the output type of all intermediate * calculations for a single function. The constructor performs type * resolution for each node in the AST based on the type of all - * function parameters. + * function parameters. The default constructor creates an empty type + * repo representing an unknown number of unknown values. **/ class NodeTypes { @@ -23,6 +24,7 @@ private: ValueType _not_found; std::map<const nodes::Node*,ValueType> _type_map; public: + NodeTypes(); NodeTypes(const Function &function, const std::vector<ValueType> &input_types); const ValueType &get_type(const nodes::Node &node) const; bool all_types_are_double() const; diff --git a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp index 6e2e7778bc7..06e514e51ba 100644 --- a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp +++ b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp @@ -54,6 +54,30 @@ SimpleTensorEngine::to_string(const Tensor &tensor) const return out; } +TensorSpec +SimpleTensorEngine::to_spec(const Tensor &tensor) const +{ + assert(&tensor.engine() == this); + const SimpleTensor &simple_tensor = static_cast<const SimpleTensor&>(tensor); + ValueType type = simple_tensor.type(); + const auto &dimensions = type.dimensions(); + TensorSpec spec(type.to_spec()); + for (const auto &cell: simple_tensor.cells()) { + TensorSpec::Address addr; + assert(cell.address.size() == dimensions.size()); + for (size_t i = 0; i < cell.address.size(); ++i) { + const auto &label = cell.address[i]; + if (label.is_mapped()) { + addr.emplace(dimensions[i].name, TensorSpec::Label(label.name)); + } else { + addr.emplace(dimensions[i].name, TensorSpec::Label(label.index)); + } + } + spec.add(addr, cell.value); + } + return spec; +} + std::unique_ptr<eval::Tensor> SimpleTensorEngine::create(const TensorSpec &spec) const { diff --git a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h index 4013aa9de5b..c3207c440fb 100644 --- a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h +++ b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h @@ -22,6 +22,7 @@ public: ValueType type_of(const Tensor &tensor) const override; bool equal(const Tensor &a, const Tensor &b) const override; vespalib::string to_string(const Tensor &tensor) const override; + TensorSpec to_spec(const Tensor &tensor) const override; std::unique_ptr<Tensor> create(const TensorSpec &spec) const override; const Value &reduce(const Tensor &tensor, const BinaryOperation &op, const std::vector<vespalib::string> &dimensions, Stash &stash) const override; diff --git a/vespalib/src/vespa/vespalib/eval/tensor_engine.h b/vespalib/src/vespa/vespalib/eval/tensor_engine.h index 637d549a55d..2458da7ff8b 100644 --- a/vespalib/src/vespa/vespalib/eval/tensor_engine.h +++ b/vespalib/src/vespa/vespalib/eval/tensor_engine.h @@ -41,6 +41,7 @@ struct TensorEngine virtual ValueType type_of(const Tensor &tensor) const = 0; virtual bool equal(const Tensor &a, const Tensor &b) const = 0; virtual vespalib::string to_string(const Tensor &tensor) const = 0; + virtual TensorSpec to_spec(const Tensor &tensor) const = 0; virtual TensorFunction::UP compile(tensor_function::Node_UP expr) const { return std::move(expr); } diff --git a/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp b/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp index 28cda1b2962..eec930b8da4 100644 --- a/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp +++ b/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp @@ -1,10 +1,49 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/fastos/fastos.h> +#include <vespa/vespalib/util/stringfmt.h> #include "tensor_spec.h" +#include <iostream> namespace vespalib { namespace eval { +vespalib::string +TensorSpec::to_string() const +{ + vespalib::string out = vespalib::make_string("spec(%s) {\n", _type.c_str()); + for (const auto &cell: _cells) { + size_t n = 0; + out.append(" ["); + for (const auto &label: cell.first) { + if (n++) { + out.append(","); + } + if (label.second.is_mapped()) { + out.append(label.second.name); + } else { + out.append(vespalib::make_string("%zu", label.second.index)); + } + } + out.append(vespalib::make_string("]: %g\n", cell.second.value)); + } + out.append("}"); + return out; +} + +bool +operator==(const TensorSpec &lhs, const TensorSpec &rhs) +{ + return ((lhs.type() == rhs.type()) && + (lhs.cells() == rhs.cells())); +} + +std::ostream & +operator<<(std::ostream &out, const TensorSpec &spec) +{ + out << spec.to_string(); + return out; +} + } // namespace vespalib::eval } // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/eval/tensor_spec.h b/vespalib/src/vespa/vespalib/eval/tensor_spec.h index aff23a42832..41c1f8d4f3c 100644 --- a/vespalib/src/vespa/vespalib/eval/tensor_spec.h +++ b/vespalib/src/vespa/vespalib/eval/tensor_spec.h @@ -3,6 +3,7 @@ #pragma once #include <vespa/vespalib/stllike/string.h> +#include <vespa/vespalib/util/approx.h> #include <memory> #include <map> @@ -25,6 +26,10 @@ public: Label(const char *name_in) : index(npos), name(name_in) {} bool is_mapped() const { return (index == npos); } bool is_indexed() const { return (index != npos); } + bool operator==(const Label &rhs) const { + return ((index == rhs.index) && + (name == rhs.name)); + } bool operator<(const Label &rhs) const { if (index != rhs.index) { return (index < rhs.index); @@ -32,8 +37,14 @@ public: return (name < rhs.name); } }; + struct Value { + double value; + Value(double value_in) : value(value_in) {} + operator double() const { return value; } + bool operator==(const Value &rhs) const { return approx_equal(value, rhs.value); } + }; using Address = std::map<vespalib::string,Label>; - using Cells = std::map<Address,double>; + using Cells = std::map<Address,Value>; private: vespalib::string _type; Cells _cells; @@ -45,7 +56,11 @@ public: } const vespalib::string &type() const { return _type; } const Cells &cells() const { return _cells; } + vespalib::string to_string() const; }; +bool operator==(const TensorSpec &lhs, const TensorSpec &rhs); +std::ostream &operator<<(std::ostream &out, const TensorSpec &tensor); + } // namespace vespalib::eval } // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp b/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp index c844a59ca47..00aee32b99a 100644 --- a/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp +++ b/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp @@ -3,9 +3,11 @@ #include <vespa/fastos/fastos.h> #include <vespa/vespalib/testkit/test_kit.h> #include "tensor_conformance.h" +#include <vespa/vespalib/util/stringfmt.h> #include <vespa/vespalib/eval/simple_tensor_engine.h> #include <vespa/vespalib/eval/tensor_spec.h> #include <vespa/vespalib/eval/function.h> +#include <vespa/vespalib/eval/tensor_function.h> #include <vespa/vespalib/eval/interpreted_function.h> namespace vespalib { @@ -13,33 +15,29 @@ namespace eval { namespace test { namespace { -// virtual ValueType type_of(const Tensor &tensor) const = 0; -// virtual bool equal(const Tensor &a, const Tensor &b) const = 0; - -// virtual TensorFunction::UP compile(tensor_function::Node_UP expr) const { return std::move(expr); } - -// virtual std::unique_ptr<Tensor> create(const TensorSpec &spec) const = 0; - -// virtual const Value &reduce(const Tensor &tensor, const BinaryOperation &op, const std::vector<vespalib::string> &dimensions, Stash &stash) const = 0; -// virtual const Value &map(const UnaryOperation &op, const Tensor &a, Stash &stash) const = 0; -// virtual const Value &apply(const BinaryOperation &op, const Tensor &a, const Tensor &b, Stash &stash) const = 0; - // Random access sequence of numbers struct Sequence { - virtual double get(size_t i) const = 0; + virtual double operator[](size_t i) const = 0; virtual ~Sequence() {} }; // Sequence of natural numbers (starting at 1) struct N : Sequence { - double get(size_t i) const override { return (1.0 + i); } + double operator[](size_t i) const override { return (1.0 + i); } }; // Sequence of another sequence divided by 10 struct Div10 : Sequence { const Sequence &seq; Div10(const Sequence &seq_in) : seq(seq_in) {} - double get(size_t i) const override { return (seq.get(i) / 10.0); } + double operator[](size_t i) const override { return (seq[i] / 10.0); } +}; + +// Sequence of another sequence minus 2 +struct Sub2 : Sequence { + const Sequence &seq; + Sub2(const Sequence &seq_in) : seq(seq_in) {} + double operator[](size_t i) const override { return (seq[i] - 2.0); } }; // Sequence of a unary operator applied to a sequence @@ -47,221 +45,758 @@ struct OpSeq : Sequence { const Sequence &seq; const UnaryOperation &op; OpSeq(const Sequence &seq_in, const UnaryOperation &op_in) : seq(seq_in), op(op_in) {} - double get(size_t i) const override { return op.eval(seq.get(i)); } + double operator[](size_t i) const override { return op.eval(seq[i]); } +}; + +// Sequence of applying sigmoid to another sequence +struct Sigmoid : Sequence { + const Sequence &seq; + Sigmoid(const Sequence &seq_in) : seq(seq_in) {} + double operator[](size_t i) const override { return operation::Sigmoid().eval(seq[i]); } }; // pre-defined sequence of numbers struct Seq : Sequence { std::vector<double> seq; + Seq() : seq() {} Seq(const std::vector<double> &seq_in) : seq(seq_in) {} - double get(size_t i) const override { + double operator[](size_t i) const override { ASSERT_LESS(i, seq.size()); return seq[i]; } }; +// Random access bit mask +struct Mask { + virtual bool operator[](size_t i) const = 0; + virtual ~Mask() {} +}; + +// Mask with all bits set +struct All : Mask { + bool operator[](size_t) const override { return true; } +}; + +// Mask with no bits set +struct None : Mask { + bool operator[](size_t) const override { return false; } +}; + +// Mask with false for each Nth index +struct SkipNth : Mask { + size_t n; + SkipNth(size_t n_in) : n(n_in) {} + bool operator[](size_t i) const override { return (i % n) != 0; } +}; + +// pre-defined mask +struct Bits : Mask { + std::vector<bool> bits; + Bits(const std::vector<bool> &bits_in) : bits(bits_in) {} + bool operator[](size_t i) const override { + ASSERT_LESS(i, bits.size()); + return bits[i]; + } +}; + +// A mask converted to a sequence of two unique values (mapped from true and false) +struct Mask2Seq : Sequence { + const Mask &mask; + double true_value; + double false_value; + Mask2Seq(const Mask &mask_in, double true_value_in = 1.0, double false_value_in = 0.0) + : mask(mask_in), true_value(true_value_in), false_value(false_value_in) {} + double operator[](size_t i) const override { return mask[i] ? true_value : false_value; } +}; + // custom op1 struct MyOp : CustomUnaryOperation { - double eval(double b) const override { return ((b + 1) * 2); } + double eval(double a) const override { return ((a + 1) * 2); } }; // A collection of labels for a single dimension -struct Space { - vespalib::string name; +struct Domain { + vespalib::string dimension; size_t size; // indexed std::vector<vespalib::string> keys; // mapped - Space(const vespalib::string &name_in, size_t size_in) - : name(name_in), size(size_in), keys() {} - Space(const vespalib::string &name_in, const std::vector<vespalib::string> &keys_in) - : name(name_in), size(0), keys(keys_in) {} + Domain(const vespalib::string &dimension_in, size_t size_in) + : dimension(dimension_in), size(size_in), keys() {} + Domain(const vespalib::string &dimension_in, const std::vector<vespalib::string> &keys_in) + : dimension(dimension_in), size(0), keys(keys_in) {} }; +using Layout = std::vector<Domain>; + +Domain x() { return Domain("x", {}); } +Domain x(size_t size) { return Domain("x", size); } +Domain x(const std::vector<vespalib::string> &keys) { return Domain("x", keys); } + +Domain y() { return Domain("y", {}); } +Domain y(size_t size) { return Domain("y", size); } +Domain y(const std::vector<vespalib::string> &keys) { return Domain("y", keys); } + +Domain z(size_t size) { return Domain("z", size); } +Domain z(const std::vector<vespalib::string> &keys) { return Domain("z", keys); } // Infer the tensor type spanned by the given spaces -vespalib::string infer_type(const std::vector<Space> &spaces) { - if (spaces.empty()) { +vespalib::string infer_type(const Layout &layout) { + if (layout.empty()) { return "double"; } std::vector<ValueType::Dimension> dimensions; - for (const auto &space: spaces) { - if (space.size == 0) { - dimensions.emplace_back(space.name); // mapped + for (const auto &domain: layout) { + if (domain.size == 0) { + dimensions.emplace_back(domain.dimension); // mapped } else { - dimensions.emplace_back(space.name, space.size); // indexed + dimensions.emplace_back(domain.dimension, domain.size); // indexed } } return ValueType::tensor_type(dimensions).to_spec(); } -// Mix spaces with a number sequence to make a tensor spec +// Wrapper for the things needed to generate a tensor +struct Source { + using Address = TensorSpec::Address; + + const Layout &layout; + const Sequence &seq; + const Mask &mask; + Source(const Layout &layout_in, const Sequence &seq_in, const Mask &mask_in) + : layout(layout_in), seq(seq_in), mask(mask_in) {} +}; + +// Mix layout with a number sequence to make a tensor spec class TensorSpecBuilder { private: using Label = TensorSpec::Label; using Address = TensorSpec::Address; - const std::vector<Space> &_spaces; - const Sequence &_seq; - TensorSpec _spec; - Address _addr; - size_t _gen_idx; + Source _source; + TensorSpec _spec; + Address _addr; + size_t _idx; - void generate(size_t space_idx) { - if (space_idx == _spaces.size()) { - _spec.add(_addr, _seq.get(_gen_idx++)); + void generate(size_t layout_idx) { + if (layout_idx == _source.layout.size()) { + if (_source.mask[_idx]) { + _spec.add(_addr, _source.seq[_idx]); + } + ++_idx; } else { - const Space &space = _spaces[space_idx]; - if (space.size > 0) { // indexed - for (size_t i = 0; i < space.size; ++i) { - _addr.emplace(space.name, Label(i)).first->second = Label(i); - generate(space_idx + 1); + const Domain &domain = _source.layout[layout_idx]; + if (domain.size > 0) { // indexed + for (size_t i = 0; i < domain.size; ++i) { + _addr.emplace(domain.dimension, Label(i)).first->second = Label(i); + generate(layout_idx + 1); } } else { // mapped - for (const vespalib::string &key: space.keys) { - _addr.emplace(space.name, Label(key)).first->second = Label(key); - generate(space_idx + 1); + for (const vespalib::string &key: domain.keys) { + _addr.emplace(domain.dimension, Label(key)).first->second = Label(key); + generate(layout_idx + 1); } } } } public: - TensorSpecBuilder(const std::vector<Space> &spaces, const Sequence &seq) - : _spaces(spaces), _seq(seq), _spec(infer_type(spaces)), _addr(), _gen_idx(0) {} + TensorSpecBuilder(const Layout &layout, const Sequence &seq, const Mask &mask) + : _source(layout, seq, mask), _spec(infer_type(layout)), _addr(), _idx(0) {} TensorSpec build() { generate(0); return _spec; } }; +TensorSpec spec(const Layout &layout, const Sequence &seq, const Mask &mask) { + return TensorSpecBuilder(layout, seq, mask).build(); +} +TensorSpec spec(const Layout &layout, const Sequence &seq) { + return spec(layout, seq, All()); +} +TensorSpec spec(const Layout &layout) { + return spec(layout, Seq(), None()); +} +TensorSpec spec(const Domain &domain, const Sequence &seq, const Mask &mask) { + return spec(Layout({domain}), seq, mask); +} +TensorSpec spec(const Domain &domain, const Sequence &seq) { + return spec(Layout({domain}), seq); +} +TensorSpec spec(const Domain &domain) { + return spec(Layout({domain})); +} +TensorSpec spec(double value) { + return spec(Layout({}), Seq({value})); +} +TensorSpec spec() { + return spec(Layout({})); +} + +// abstract evaluation wrapper +struct Eval { + // typed result wrapper + class Result { + private: + enum class Type { ERROR, NUMBER, TENSOR }; + Type _type; + double _number; + TensorSpec _tensor; + public: + Result(const Value &value) : _type(Type::ERROR), _number(error_value), _tensor("error") { + if (value.is_double()) { + _type = Type::NUMBER; + _number = value.as_double(); + _tensor = TensorSpec("double").add({}, _number); + } else if (value.is_tensor()) { + _type = Type::TENSOR; + _tensor = value.as_tensor()->engine().to_spec(*value.as_tensor()); + if (_tensor.type() == "double") { + _number = _tensor.cells().empty() ? 0.0 : _tensor.cells().begin()->second.value; + } + } + } + bool is_error() const { return (_type == Type::ERROR); } + bool is_number() const { return (_type == Type::NUMBER); } + bool is_tensor() const { return (_type == Type::TENSOR); } + double number() const { + EXPECT_TRUE(is_number()); + return _number; + } + const TensorSpec &tensor() const { + EXPECT_TRUE(is_tensor()); + return _tensor; + } + }; + virtual Result eval(const TensorEngine &) const { + TEST_ERROR("wrong signature"); + return Result(ErrorValue()); + } + virtual Result eval(const TensorEngine &, const TensorSpec &) const { + TEST_ERROR("wrong signature"); + return Result(ErrorValue()); + } + virtual Result eval(const TensorEngine &, const TensorSpec &, const TensorSpec &) const { + TEST_ERROR("wrong signature"); + return Result(ErrorValue()); + } + virtual ~Eval() {} +}; + +// catches exceptions trying to keep the test itself safe from eval side-effects +struct SafeEval : Eval { + const Eval &unsafe; + SafeEval(const Eval &unsafe_in) : unsafe(unsafe_in) {} + Result eval(const TensorEngine &engine) const override { + try { + return unsafe.eval(engine); + } catch (std::exception &e) { + TEST_ERROR(e.what()); + return Result(ErrorValue()); + } + } + Result eval(const TensorEngine &engine, const TensorSpec &a) const override { + try { + return unsafe.eval(engine, a); + } catch (std::exception &e) { + TEST_ERROR(e.what()); + return Result(ErrorValue()); + } + + } + Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override { + try { + return unsafe.eval(engine, a, b); + } catch (std::exception &e) { + TEST_ERROR(e.what()); + return Result(ErrorValue()); + } + } +}; +SafeEval safe(const Eval &eval) { return SafeEval(eval); } + +// expression(void) +struct Expr_V : Eval { + const vespalib::string &expr; + Expr_V(const vespalib::string &expr_in) : expr(expr_in) {} + Result eval(const TensorEngine &engine) const override { + Function fun = Function::parse(expr); + NodeTypes types(fun, {}); + InterpretedFunction ifun(engine, fun, types); + InterpretedFunction::Context ctx; + return Result(ifun.eval(ctx)); + } +}; + +// expression(tensor) +struct Expr_T : Eval { + const vespalib::string &expr; + Expr_T(const vespalib::string &expr_in) : expr(expr_in) {} + Result eval(const TensorEngine &engine, const TensorSpec &a) const override { + Function fun = Function::parse(expr); + auto a_type = ValueType::from_spec(a.type()); + NodeTypes types(fun, {a_type}); + InterpretedFunction ifun(engine, fun, types); + InterpretedFunction::Context ctx; + TensorValue va(engine.create(a)); + ctx.add_param(va); + return Result(ifun.eval(ctx)); + } +}; + +// expression(tensor,tensor) +struct Expr_TT : Eval { + const vespalib::string &expr; + Expr_TT(const vespalib::string &expr_in) : expr(expr_in) {} + Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override { + Function fun = Function::parse(expr); + auto a_type = ValueType::from_spec(a.type()); + auto b_type = ValueType::from_spec(b.type()); + NodeTypes types(fun, {a_type, b_type}); + InterpretedFunction ifun(engine, fun, types); + InterpretedFunction::Context ctx; + TensorValue va(engine.create(a)); + TensorValue vb(engine.create(b)); + ctx.add_param(va); + ctx.add_param(vb); + return Result(ifun.eval(ctx)); + } +}; + +// evaluate tensor reduce operation using tensor engine immediate api +struct ImmediateReduce : Eval { + const BinaryOperation &op; + std::vector<vespalib::string> dimensions; + ImmediateReduce(const BinaryOperation &op_in) : op(op_in), dimensions() {} + ImmediateReduce(const BinaryOperation &op_in, const vespalib::string &dimension) + : op(op_in), dimensions({dimension}) {} + Result eval(const TensorEngine &engine, const TensorSpec &a) const override { + Stash stash; + return Result(engine.reduce(*engine.create(a), op, dimensions, stash)); + } +}; + +// evaluate tensor map operation using tensor engine immediate api +struct ImmediateMap : Eval { + const UnaryOperation &op; + ImmediateMap(const UnaryOperation &op_in) : op(op_in) {} + Result eval(const TensorEngine &engine, const TensorSpec &a) const override { + Stash stash; + return Result(engine.map(op, *engine.create(a), stash)); + } +}; + +// evaluate tensor apply operation using tensor engine immediate api +struct ImmediateApply : Eval { + const BinaryOperation &op; + ImmediateApply(const BinaryOperation &op_in) : op(op_in) {} + Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override { + Stash stash; + return Result(engine.apply(op, *engine.create(a), *engine.create(b), stash)); + } +}; + +const size_t tensor_id_a = 11; +const size_t tensor_id_b = 12; +const size_t map_operation_id = 22; + +// input used when evaluating in retained mode +struct Input : TensorFunction::Input { + std::vector<TensorValue> tensors; + const UnaryOperation *map_op; + Input(std::unique_ptr<Tensor> a) : tensors(), map_op(nullptr) { + tensors.emplace_back(std::move(a)); + } + Input(std::unique_ptr<Tensor> a, const UnaryOperation &op) : tensors(), map_op(&op) { + tensors.emplace_back(std::move(a)); + } + Input(std::unique_ptr<Tensor> a, std::unique_ptr<Tensor> b) : tensors(), map_op(nullptr) { + tensors.emplace_back(std::move(a)); + tensors.emplace_back(std::move(b)); + } + const Value &get_tensor(size_t id) const override { + size_t offset = (id - tensor_id_a); + ASSERT_GREATER(tensors.size(), offset); + return tensors[offset]; + } + const UnaryOperation &get_map_operation(size_t id) const { + ASSERT_TRUE(map_op != nullptr); + ASSERT_EQUAL(id, map_operation_id); + return *map_op; + } +}; + +// evaluate tensor reduce operation using tensor engine retained api +struct RetainedReduce : Eval { + const BinaryOperation &op; + std::vector<vespalib::string> dimensions; + RetainedReduce(const BinaryOperation &op_in) : op(op_in), dimensions() {} + RetainedReduce(const BinaryOperation &op_in, const vespalib::string &dimension) + : op(op_in), dimensions({dimension}) {} + Result eval(const TensorEngine &engine, const TensorSpec &a) const override { + auto a_type = ValueType::from_spec(a.type()); + auto ir = tensor_function::reduce(tensor_function::inject(a_type, tensor_id_a), op, dimensions); + auto fun = engine.compile(std::move(ir)); + Input input(engine.create(a)); + Stash stash; + return Result(fun->eval(input, stash)); + } +}; + +// evaluate tensor map operation using tensor engine retained api +struct RetainedMap : Eval { + const UnaryOperation &op; + RetainedMap(const UnaryOperation &op_in) : op(op_in) {} + Result eval(const TensorEngine &engine, const TensorSpec &a) const override { + auto a_type = ValueType::from_spec(a.type()); + auto ir = tensor_function::map(map_operation_id, tensor_function::inject(a_type, tensor_id_a)); + auto fun = engine.compile(std::move(ir)); + Input input(engine.create(a), op); + Stash stash; + return Result(fun->eval(input, stash)); + } +}; + +// evaluate tensor apply operation using tensor engine retained api +struct RetainedApply : Eval { + const BinaryOperation &op; + RetainedApply(const BinaryOperation &op_in) : op(op_in) {} + Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override { + auto a_type = ValueType::from_spec(a.type()); + auto b_type = ValueType::from_spec(b.type()); + auto ir = tensor_function::apply(op, tensor_function::inject(a_type, tensor_id_a), + tensor_function::inject(a_type, tensor_id_b)); + auto fun = engine.compile(std::move(ir)); + Input input(engine.create(a), engine.create(b)); + Stash stash; + return Result(fun->eval(input, stash)); + } +}; + +// placeholder used for unused values in a sequence +const double X = error_value; + +// NaN value +const double my_nan = std::numeric_limits<double>::quiet_NaN(); + // Test wrapper to avoid passing global test parameters around struct TestContext { + + const TensorEngine &ref_engine; const TensorEngine &engine; - TestContext(const TensorEngine &engine_in) : engine(engine_in) {} + bool test_mixed_cases; + size_t skip_count; + + TestContext(const TensorEngine &engine_in, bool test_mixed_cases_in) + : ref_engine(SimpleTensorEngine::ref()), engine(engine_in), + test_mixed_cases(test_mixed_cases_in), skip_count(0) {} - std::unique_ptr<Tensor> tensor(const std::vector<Space> &spaces, const Sequence &seq) { - return engine.create(TensorSpecBuilder(spaces, seq).build()); + std::unique_ptr<Tensor> tensor(const TensorSpec &spec) { + auto result = engine.create(spec); + EXPECT_EQUAL(spec.type(), engine.type_of(*result).to_spec()); + return result; + } + + bool mixed(size_t n) { + if (!test_mixed_cases) { + skip_count += n; + } + return test_mixed_cases; } + //------------------------------------------------------------------------- + void verify_create_type(const vespalib::string &type_spec) { auto tensor = engine.create(TensorSpec(type_spec)); EXPECT_TRUE(&engine == &tensor->engine()); EXPECT_EQUAL(type_spec, engine.type_of(*tensor).to_spec()); } - void verify_not_equal(const Tensor &a, const Tensor &b) { - EXPECT_FALSE(a == b); - EXPECT_FALSE(b == a); - } - - void verify_verbatim_tensor(const vespalib::string &tensor_expr, const Tensor &expect) { - InterpretedFunction::Context ctx; - InterpretedFunction ifun(engine, Function::parse(tensor_expr)); - const Value &result = ifun.eval(ctx); - if (EXPECT_TRUE(result.is_tensor())) { - const Tensor *actual = result.as_tensor(); - EXPECT_EQUAL(*actual, expect); - } - } - void test_tensor_create_type() { TEST_DO(verify_create_type("double")); TEST_DO(verify_create_type("tensor(x{})")); TEST_DO(verify_create_type("tensor(x{},y{})")); TEST_DO(verify_create_type("tensor(x[5])")); TEST_DO(verify_create_type("tensor(x[5],y[10])")); - TEST_DO(verify_create_type("tensor(x{},y[10])")); - TEST_DO(verify_create_type("tensor(x[5],y{})")); + if (mixed(2)) { + TEST_DO(verify_create_type("tensor(x{},y[10])")); + TEST_DO(verify_create_type("tensor(x[5],y{})")); + } + } + + //------------------------------------------------------------------------- + + void verify_equal(const TensorSpec &a, const TensorSpec &b) { + auto ta = tensor(a); + auto tb = tensor(b); + EXPECT_EQUAL(a, b); + EXPECT_EQUAL(*ta, *tb); + TensorSpec spec = engine.to_spec(*ta); + TensorSpec ref_spec = ref_engine.to_spec(*ref_engine.create(a)); + EXPECT_EQUAL(spec, ref_spec); + } + + void test_tensor_equality() { + TEST_DO(verify_equal(spec(), spec())); + TEST_DO(verify_equal(spec(10.0), spec(10.0))); + TEST_DO(verify_equal(spec(x()), spec(x()))); + TEST_DO(verify_equal(spec(x({"a"}), Seq({1})), spec(x({"a"}), Seq({1})))); + TEST_DO(verify_equal(spec({x({"a"}),y({"a"})}, Seq({1})), spec({y({"a"}),x({"a"})}, Seq({1})))); + TEST_DO(verify_equal(spec(x(3)), spec(x(3)))); + TEST_DO(verify_equal(spec({x(1),y(1)}, Seq({1})), spec({y(1),x(1)}, Seq({1})))); + if (mixed(2)) { + TEST_DO(verify_equal(spec({x({"a"}),y(1)}, Seq({1})), spec({y(1),x({"a"})}, Seq({1})))); + TEST_DO(verify_equal(spec({y({"a"}),x(1)}, Seq({1})), spec({x(1),y({"a"})}, Seq({1})))); + } + } + + //------------------------------------------------------------------------- + + void verify_not_equal(const TensorSpec &a, const TensorSpec &b) { + auto ta = tensor(a); + auto tb = tensor(b); + EXPECT_NOT_EQUAL(a, b); + EXPECT_NOT_EQUAL(b, a); + EXPECT_NOT_EQUAL(*ta, *tb); + EXPECT_NOT_EQUAL(*tb, *ta); } void test_tensor_inequality() { - TEST_DO(verify_not_equal(*engine.create(TensorSpec("double")), - *engine.create(TensorSpec("tensor(x{})")))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("double")), - *engine.create(TensorSpec("tensor(x[1])")))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x{})")), - *engine.create(TensorSpec("tensor(y{})")))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x[1])")), - *engine.create(TensorSpec("tensor(x[2])")))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x[1])")), - *engine.create(TensorSpec("tensor(y[1])")))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x{})")), - *engine.create(TensorSpec("tensor(x[1])")))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("double").add({}, 1)), - *engine.create(TensorSpec("double").add({}, 2)))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x{})").add({{"x", "a"}}, 1)), - *engine.create(TensorSpec("tensor(x{})").add({{"x", "a"}}, 2)))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x{})").add({{"x", "a"}}, 1)), - *engine.create(TensorSpec("tensor(x{})").add({{"x", "b"}}, 1)))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x{})").add({{"x", "a"}}, 1)), - *engine.create(TensorSpec("tensor(x{},y{})").add({{"x", "a"},{"y", "a"}}, 1)))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x[1])").add({{"x", 0}}, 1)), - *engine.create(TensorSpec("tensor(x[1])").add({{"x", 0}}, 2)))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x[1])").add({{"x", 0}}, 1)), - *engine.create(TensorSpec("tensor(x[2])").add({{"x", 0}}, 1)))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x[2])").add({{"x", 0}}, 1)), - *engine.create(TensorSpec("tensor(x[2])").add({{"x", 1}}, 1)))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x[1])").add({{"x", 0}}, 1)), - *engine.create(TensorSpec("tensor(x[1],y[1])").add({{"x", 0},{"y", 0}}, 1)))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x{},y[1])").add({{"x", "a"},{"y", 0}}, 1)), - *engine.create(TensorSpec("tensor(x{},y[1])").add({{"x", "a"},{"y", 0}}, 2)))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x{},y[1])").add({{"x", "a"},{"y", 0}}, 1)), - *engine.create(TensorSpec("tensor(x{},y[1])").add({{"x", "b"},{"y", 0}}, 1)))); - TEST_DO(verify_not_equal(*engine.create(TensorSpec("tensor(x[2],y{})").add({{"x", 0},{"y", "a"}}, 1)), - *engine.create(TensorSpec("tensor(x[2],y{})").add({{"x", 1},{"y", "a"}}, 1)))); + TEST_DO(verify_not_equal(spec(1.0), spec(2.0))); + TEST_DO(verify_not_equal(spec(), spec(x()))); + TEST_DO(verify_not_equal(spec(), spec(x(1)))); + TEST_DO(verify_not_equal(spec(x()), spec(x(1)))); + TEST_DO(verify_not_equal(spec(x()), spec(y()))); + TEST_DO(verify_not_equal(spec(x(1)), spec(x(2)))); + TEST_DO(verify_not_equal(spec(x(1)), spec(y(1)))); + TEST_DO(verify_not_equal(spec(x({"a"}), Seq({1})), spec(x({"a"}), Seq({2})))); + TEST_DO(verify_not_equal(spec(x({"a"}), Seq({1})), spec(x({"b"}), Seq({1})))); + TEST_DO(verify_not_equal(spec(x({"a"}), Seq({1})), spec({x({"a"}),y({"a"})}, Seq({1})))); + TEST_DO(verify_not_equal(spec(x(1), Seq({1})), spec(x(1), Seq({2})))); + TEST_DO(verify_not_equal(spec(x(1), Seq({1})), spec(x(2), Seq({1}), Bits({1,0})))); + TEST_DO(verify_not_equal(spec(x(2), Seq({1,1}), Bits({1,0})), + spec(x(2), Seq({1,1}), Bits({0,1})))); + TEST_DO(verify_not_equal(spec(x(1), Seq({1})), spec({x(1),y(1)}, Seq({1})))); + if (mixed(3)) { + TEST_DO(verify_not_equal(spec({x({"a"}),y(1)}, Seq({1})), spec({x({"a"}),y(1)}, Seq({2})))); + TEST_DO(verify_not_equal(spec({x({"a"}),y(1)}, Seq({1})), spec({x({"b"}),y(1)}, Seq({1})))); + TEST_DO(verify_not_equal(spec({x(2),y({"a"})}, Seq({1}), Bits({1,0})), + spec({x(2),y({"a"})}, Seq({X,1}), Bits({0,1})))); + } + } + + //------------------------------------------------------------------------- + + void verify_verbatim_tensor(const vespalib::string &tensor_expr, const TensorSpec &expect) { + EXPECT_EQUAL(Expr_V(tensor_expr).eval(engine).tensor(), expect); } void test_verbatim_tensors() { - TEST_DO(verify_verbatim_tensor("{}", *engine.create(TensorSpec("double")))); - TEST_DO(verify_verbatim_tensor("{{}:5}", *engine.create(TensorSpec("double").add({}, 5)))); - TEST_DO(verify_verbatim_tensor("{{x:foo}:1,{x:bar}:2,{x:baz}:3}", *engine.create(TensorSpec("tensor(x{})") - .add({{"x", "foo"}}, 1) - .add({{"x", "bar"}}, 2) - .add({{"x", "baz"}}, 3)))); - TEST_DO(verify_verbatim_tensor("{{x:foo,y:a}:1,{y:b,x:bar}:2}", *engine.create(TensorSpec("tensor(x{},y{})") - .add({{"x", "foo"}, {"y", "a"}}, 1) - .add({{"x", "bar"}, {"y", "b"}}, 2)))); - } - - void verify_map_op(const UnaryOperation &op, const Tensor &input, const Tensor &expect) { - Stash stash; - const Value &result = engine.map(op, input, stash); - if (EXPECT_TRUE(result.is_tensor())) { - const Tensor &actual = *result.as_tensor(); - EXPECT_EQUAL(actual, expect); + TEST_DO(verify_verbatim_tensor("{}", spec(0.0))); + TEST_DO(verify_verbatim_tensor("{{}:5}", spec(5.0))); + TEST_DO(verify_verbatim_tensor("{{x:foo}:1,{x:bar}:2,{x:baz}:3}", spec(x({"foo","bar","baz"}), Seq({1,2,3})))); + TEST_DO(verify_verbatim_tensor("{{x:foo,y:a}:1,{y:b,x:bar}:2}", + spec({x({"foo","bar"}),y({"a","b"})}, Seq({1,X,X,2}), Bits({1,0,0,1})))); + } + + //------------------------------------------------------------------------- + + void verify_reduce_result(const Eval &eval, const TensorSpec &a, const Eval::Result &expect) { + if (expect.is_tensor()) { + EXPECT_EQUAL(eval.eval(engine, a).tensor(), expect.tensor()); + } else if (expect.is_number()) { + EXPECT_EQUAL(eval.eval(engine, a).number(), expect.number()); + } else { + TEST_FATAL("expected result should be valid"); + } + } + + void test_reduce_op(const vespalib::string &name, const BinaryOperation &op, const Sequence &seq) { + std::vector<Layout> layouts = { + {x(3)}, + {x(3),y(5)}, + {x(3),y(5),z(7)}, + {x({"a","b","c"})}, + {x({"a","b","c"}),y({"foo","bar"})}, + {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})} + }; + if (mixed(2 * 4)) { + layouts.push_back({x(3),y({"foo", "bar"}),z(7)}); + layouts.push_back({x({"a","b","c"}),y(5),z({"i","j","k","l"})}); + } + for (const Layout &layout: layouts) { + TensorSpec input = spec(layout, seq); + for (const Domain &domain: layout) { + Eval::Result expect = ImmediateReduce(op, domain.dimension).eval(ref_engine, input); + TEST_STATE(make_string("shape: %s, reduce dimension: %s", + infer_type(layout).c_str(), domain.dimension.c_str()).c_str()); + if (!name.empty()) { + vespalib::string expr = make_string("%s(a,%s)", name.c_str(), domain.dimension.c_str()); + TEST_DO(verify_reduce_result(Expr_T(expr), input, expect)); + } + TEST_DO(verify_reduce_result(ImmediateReduce(op, domain.dimension), input, expect)); + TEST_DO(verify_reduce_result(RetainedReduce(op, domain.dimension), input, expect)); + } + { + Eval::Result expect = ImmediateReduce(op).eval(ref_engine, input); + TEST_STATE(make_string("shape: %s, reduce all dimensions", + infer_type(layout).c_str()).c_str()); + if (!name.empty()) { + vespalib::string expr = make_string("%s(a)", name.c_str()); + TEST_DO(verify_reduce_result(Expr_T(expr), input, expect)); + } + TEST_DO(verify_reduce_result(ImmediateReduce(op), input, expect)); + TEST_DO(verify_reduce_result(RetainedReduce(op), input, expect)); + } + } + } + + void test_tensor_reduce() { + TEST_DO(test_reduce_op("sum", operation::Add(), N())); + TEST_DO(test_reduce_op("", operation::Mul(), Sigmoid(N()))); + TEST_DO(test_reduce_op("", operation::Min(), N())); + TEST_DO(test_reduce_op("", operation::Max(), N())); + } + + //------------------------------------------------------------------------- + + void test_map_op(const Eval &eval, const UnaryOperation &ref_op, const Sequence &seq) { + std::vector<Layout> layouts = { + {}, + {x(3)}, + {x(3),y(5)}, + {x(3),y(5),z(7)}, + {x({"a","b","c"})}, + {x({"a","b","c"}),y({"foo","bar"})}, + {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})} + }; + if (mixed(2)) { + layouts.push_back({x(3),y({"foo", "bar"}),z(7)}); + layouts.push_back({x({"a","b","c"}),y(5),z({"i","j","k","l"})}); + } + for (const Layout &layout: layouts) { + EXPECT_EQUAL(eval.eval(engine, spec(layout, seq)).tensor(), spec(layout, OpSeq(seq, ref_op))); } } - void test_map_op(const UnaryOperation &op, const Sequence &seq) { - TEST_DO(verify_map_op(op, - *tensor({Space("x", 10)}, seq), - *tensor({Space("x", 10)}, OpSeq(seq, op)))); - TEST_DO(verify_map_op(op, - *tensor({Space("x", {"a", "b", "c"})}, seq), - *tensor({Space("x", {"a", "b", "c"})}, OpSeq(seq, op)))); + void test_map_op(const vespalib::string &expr, const UnaryOperation &op, const Sequence &seq) { + TEST_DO(test_map_op(ImmediateMap(op), op, seq)); + TEST_DO(test_map_op(RetainedMap(op), op, seq)); + TEST_DO(test_map_op(Expr_T(expr), op, seq)); } void test_tensor_map() { - TEST_DO(test_map_op(operation::Floor(), Div10(N()))); - TEST_DO(test_map_op(operation::Ceil(), Div10(N()))); - TEST_DO(test_map_op(operation::Sqrt(), Div10(N()))); - TEST_DO(test_map_op(MyOp(), Div10(N()))); + TEST_DO(test_map_op("-a", operation::Neg(), Sub2(Div10(N())))); + TEST_DO(test_map_op("!a", operation::Not(), Mask2Seq(SkipNth(3)))); + TEST_DO(test_map_op("cos(a)", operation::Cos(), Div10(N()))); + TEST_DO(test_map_op("sin(a)", operation::Sin(), Div10(N()))); + TEST_DO(test_map_op("tan(a)", operation::Tan(), Div10(N()))); + TEST_DO(test_map_op("cosh(a)", operation::Cosh(), Div10(N()))); + TEST_DO(test_map_op("sinh(a)", operation::Sinh(), Div10(N()))); + TEST_DO(test_map_op("tanh(a)", operation::Tanh(), Div10(N()))); + TEST_DO(test_map_op("acos(a)", operation::Acos(), Sigmoid(Div10(N())))); + TEST_DO(test_map_op("asin(a)", operation::Asin(), Sigmoid(Div10(N())))); + TEST_DO(test_map_op("atan(a)", operation::Atan(), Div10(N()))); + TEST_DO(test_map_op("exp(a)", operation::Exp(), Div10(N()))); + TEST_DO(test_map_op("log10(a)", operation::Log10(), Div10(N()))); + TEST_DO(test_map_op("log(a)", operation::Log(), Div10(N()))); + TEST_DO(test_map_op("sqrt(a)", operation::Sqrt(), Div10(N()))); + TEST_DO(test_map_op("ceil(a)", operation::Ceil(), Div10(N()))); + TEST_DO(test_map_op("fabs(a)", operation::Fabs(), Div10(N()))); + TEST_DO(test_map_op("floor(a)", operation::Floor(), Div10(N()))); + TEST_DO(test_map_op("isNan(a)", operation::IsNan(), Mask2Seq(SkipNth(3), 1.0, my_nan))); + TEST_DO(test_map_op("relu(a)", operation::Relu(), Sub2(Div10(N())))); + TEST_DO(test_map_op("sigmoid(a)", operation::Sigmoid(), Sub2(Div10(N())))); + TEST_DO(test_map_op("(a+1)*2", MyOp(), Div10(N()))); } - void run_all_tests() { + //------------------------------------------------------------------------- + + void test_apply_op(const Eval &eval, const BinaryOperation &op, const Sequence &seq) { + std::vector<Layout> layouts = { + {}, {}, + {x(5)}, {x(5)}, + {x(5)}, {x(3)}, + {x(5)}, {y(5)}, + {x(5)}, {x(5),y(5)}, + {x(3),y(5)}, {x(4),y(4)}, + {x(3),y(5)}, {y(5),z(7)}, + {x({"a","b","c"})}, {x({"a","b","c"})}, + {x({"a","b","c"})}, {x({"a","b"})}, + {x({"a","b","c"})}, {y({"foo","bar","baz"})}, + {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})}, + {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})}, + {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})} + }; + if (mixed(2)) { + layouts.push_back({x(3),y({"foo", "bar"})}); + layouts.push_back({y({"foo", "bar"}),z(7)}); + layouts.push_back({x({"a","b","c"}),y(5)}); + layouts.push_back({y(5),z({"i","j","k","l"})}); + } + ASSERT_TRUE((layouts.size() % 2) == 0); + for (size_t i = 0; i < layouts.size(); i += 2) { + TensorSpec lhs_input = spec(layouts[i], seq); + TensorSpec rhs_input = spec(layouts[i + 1], seq); + TEST_STATE(make_string("lhs shape: %s, rhs shape: %s", + lhs_input.type().c_str(), + rhs_input.type().c_str()).c_str()); + TensorSpec expect = ImmediateApply(op).eval(ref_engine, lhs_input, rhs_input).tensor(); + EXPECT_EQUAL(safe(eval).eval(engine, lhs_input, rhs_input).tensor(), expect); + } + } + + void test_apply_op(const vespalib::string &expr, const BinaryOperation &op, const Sequence &seq) { + TEST_DO(test_apply_op(ImmediateApply(op), op, seq)); + TEST_DO(test_apply_op(RetainedApply(op), op, seq)); + TEST_DO(test_apply_op(Expr_TT(expr), op, seq)); + } + + void test_tensor_apply() { + TEST_DO(test_apply_op("a+b", operation::Add(), Div10(N()))); + TEST_DO(test_apply_op("a-b", operation::Sub(), Div10(N()))); + TEST_DO(test_apply_op("a*b", operation::Mul(), Div10(N()))); + TEST_DO(test_apply_op("a/b", operation::Div(), Div10(N()))); + TEST_DO(test_apply_op("a^b", operation::Pow(), Div10(N()))); + TEST_DO(test_apply_op("pow(a,b)", operation::Pow(), Div10(N()))); + TEST_DO(test_apply_op("a==b", operation::Equal(), Div10(N()))); + TEST_DO(test_apply_op("a!=b", operation::NotEqual(), Div10(N()))); + TEST_DO(test_apply_op("a~=b", operation::Approx(), Div10(N()))); + TEST_DO(test_apply_op("a<b", operation::Less(), Div10(N()))); + TEST_DO(test_apply_op("a<=b", operation::LessEqual(), Div10(N()))); + TEST_DO(test_apply_op("a>b", operation::Greater(), Div10(N()))); + TEST_DO(test_apply_op("a>=b", operation::GreaterEqual(), Div10(N()))); + TEST_DO(test_apply_op("a&&b", operation::And(), Mask2Seq(SkipNth(3)))); + TEST_DO(test_apply_op("a||b", operation::Or(), Mask2Seq(SkipNth(3)))); + TEST_DO(test_apply_op("atan2(a,b)", operation::Atan2(), Div10(N()))); + TEST_DO(test_apply_op("ldexp(a,b)", operation::Ldexp(), Div10(N()))); + TEST_DO(test_apply_op("fmod(a,b)", operation::Fmod(), Div10(N()))); + TEST_DO(test_apply_op("min(a,b)", operation::Min(), Div10(N()))); + TEST_DO(test_apply_op("max(a,b)", operation::Max(), Div10(N()))); + } + + //------------------------------------------------------------------------- + + void run_tests() { TEST_DO(test_tensor_create_type()); + TEST_DO(test_tensor_equality()); TEST_DO(test_tensor_inequality()); TEST_DO(test_verbatim_tensors()); + TEST_DO(test_tensor_reduce()); TEST_DO(test_tensor_map()); + TEST_DO(test_tensor_apply()); } }; } // namespace vespalib::eval::test::<unnamed> void -TensorConformance::run_tests(const TensorEngine &engine) +TensorConformance::run_tests(const TensorEngine &engine, bool test_mixed_cases) { - TestContext ctx(engine); - ctx.run_all_tests(); + TestContext ctx(engine, test_mixed_cases); + ctx.run_tests(); + if (ctx.skip_count > 0) { + fprintf(stderr, "WARNING: skipped %zu mixed test cases\n", ctx.skip_count); + } } } // namespace vespalib::eval::test diff --git a/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.h b/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.h index 8d90f548b33..ed1ff618f49 100644 --- a/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.h +++ b/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.h @@ -13,7 +13,7 @@ namespace test { * implementations of the TensorEngine interface. **/ struct TensorConformance { - static void run_tests(const TensorEngine &engine); + static void run_tests(const TensorEngine &engine, bool test_mixed_cases); }; } // namespace vespalib::eval::test diff --git a/vespalib/src/vespa/vespalib/eval/value.cpp b/vespalib/src/vespa/vespalib/eval/value.cpp index ff72ac4c85c..90104693716 100644 --- a/vespalib/src/vespa/vespalib/eval/value.cpp +++ b/vespalib/src/vespa/vespalib/eval/value.cpp @@ -42,5 +42,11 @@ TensorValue::apply(const BinaryOperation &op, const Value &rhs, Stash &stash) co return _value->engine().apply(op, *_value, *other, stash); } +ValueType +TensorValue::type() const +{ + return _value->engine().type_of(*_value); +} + } // namespace vespalib::eval } // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/eval/value.h b/vespalib/src/vespa/vespalib/eval/value.h index 22e90b9327f..51e97d7f498 100644 --- a/vespalib/src/vespa/vespalib/eval/value.h +++ b/vespalib/src/vespa/vespalib/eval/value.h @@ -6,6 +6,7 @@ #include <memory> #include <vespa/vespalib/util/stash.h> #include "tensor.h" +#include "value_type.h" namespace vespalib { namespace eval { @@ -33,6 +34,7 @@ struct Value { virtual bool equal(const Value &rhs) const = 0; virtual const Value &apply(const UnaryOperation &op, Stash &stash) const; virtual const Value &apply(const BinaryOperation &op, const Value &rhs, Stash &stash) const; + virtual ValueType type() const = 0; virtual ~Value() {} }; @@ -40,6 +42,7 @@ struct ErrorValue : public Value { virtual bool is_error() const override { return true; } virtual double as_double() const { return error_value; } virtual bool equal(const Value &) const override { return false; } + ValueType type() const override { return ValueType::error_type(); } }; class DoubleValue : public Value @@ -54,6 +57,7 @@ public: bool equal(const Value &rhs) const override { return (rhs.is_double() && (_value == rhs.as_double())); } + ValueType type() const override { return ValueType::double_type(); } }; class TensorValue : public Value @@ -67,6 +71,7 @@ public: bool equal(const Value &rhs) const override; const Value &apply(const UnaryOperation &op, Stash &stash) const override; const Value &apply(const BinaryOperation &op, const Value &rhs, Stash &stash) const override; + ValueType type() const override; }; } // namespace vespalib::eval diff --git a/vespalib/src/vespa/vespalib/objects/nbostream.cpp b/vespalib/src/vespa/vespalib/objects/nbostream.cpp index 12755dcd865..ab6abecc373 100644 --- a/vespalib/src/vespa/vespalib/objects/nbostream.cpp +++ b/vespalib/src/vespa/vespalib/objects/nbostream.cpp @@ -9,9 +9,21 @@ namespace vespalib { -std::ostream & operator << (std::ostream & os, const HexDump & hd) +namespace { + + const char * hexChar = "0123456789ABCDEF"; + +} + +string +HexDump::toString() const { + asciistream os; + os << *this; + return os.str(); +} + +asciistream & operator << (asciistream & os, const HexDump & hd) { - static const char * hexChar = "0123456789ABCDEF"; os << hd._sz << ' '; const uint8_t *c = static_cast<const uint8_t *>(hd._buf); for (size_t i(0); i < hd._sz; i++) { @@ -20,6 +32,11 @@ std::ostream & operator << (std::ostream & os, const HexDump & hd) return os; } +std::ostream & operator << (std::ostream & os, const HexDump & hd) +{ + return os << hd.toString(); +} + void nbostream::fail(State s) { _state = static_cast<State>(_state | s); diff --git a/vespalib/src/vespa/vespalib/objects/nbostream.h b/vespalib/src/vespa/vespalib/objects/nbostream.h index b68a5d3cacb..cb15db018fc 100644 --- a/vespalib/src/vespa/vespalib/objects/nbostream.h +++ b/vespalib/src/vespa/vespalib/objects/nbostream.h @@ -5,6 +5,7 @@ #include <vector> #include <string> #include <vespa/vespalib/stllike/string.h> +#include <vespa/vespalib/stllike/asciistream.h> #include <vespa/vespalib/util/array.h> #include <vespa/vespalib/util/buffer.h> @@ -17,7 +18,9 @@ class HexDump { public: HexDump(const void * buf, size_t sz) : _buf(buf), _sz(sz) { } + vespalib::string toString() const; friend std::ostream & operator << (std::ostream & os, const HexDump & hd); + friend asciistream & operator << (asciistream & os, const HexDump & hd); private: const void * _buf; size_t _sz; @@ -33,7 +36,8 @@ private: class nbostream { public: - typedef Array<char, DefaultAlloc> Buffer; + using Buffer = Array<char>; + using Alloc = alloc::Alloc; enum State { ok=0, eof=0x01}; nbostream(size_t initialSize=1024) : _wbuf(), @@ -56,7 +60,7 @@ class nbostream { } - nbostream(DefaultAlloc && buf, size_t sz) : + nbostream(Alloc && buf, size_t sz) : _wbuf(std::move(buf), sz), _rbuf(&_wbuf[0], sz), _rp(0), diff --git a/vespalib/src/vespa/vespalib/stllike/hashtable.h b/vespalib/src/vespa/vespalib/stllike/hashtable.h index 6515f30bf57..a1eeb289c31 100644 --- a/vespalib/src/vespa/vespalib/stllike/hashtable.h +++ b/vespalib/src/vespa/vespalib/stllike/hashtable.h @@ -121,10 +121,6 @@ public: void terminate() { _next = npos; } bool valid() const { return _next != invalid; } bool hasNext() const { return valid() && (_next != npos); } - void swap(hash_node & rhs) { - std::swap(_next, rhs._next); - std::swap(_node, rhs._node); - } private: next_t _next; V _node; @@ -136,7 +132,7 @@ class hashtable : public hashtable_base private: using Node=hash_node<Value>; protected: - typedef vespalib::Array<Node, vespalib::DefaultAlloc > NodeStore; + typedef vespalib::Array<Node> NodeStore; virtual void move(NodeStore && oldStore); public: class const_iterator; @@ -304,7 +300,7 @@ private: next_t hash(const Key & key) const { return modulator(_hasher(key)); } template <typename MoveHandler> void move(MoveHandler & moveHandler, next_t from, next_t to) { - _nodes[to].swap(_nodes[from]); + _nodes[to] = std::move(_nodes[from]); moveHandler.move(from, to); } template <typename MoveHandler> diff --git a/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt index bb5896d0875..8bd25747b35 100644 --- a/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt +++ b/vespalib/src/vespa/vespalib/tensor/CMakeLists.txt @@ -8,10 +8,9 @@ vespa_add_library(vespalib_vespalib_tensor tensor_factory.cpp tensor_function.cpp tensor_mapper.cpp - $<TARGET_OBJECTS:vespalib_vespalib_tensor_compact> + $<TARGET_OBJECTS:vespalib_vespalib_tensor_sparse> $<TARGET_OBJECTS:vespalib_vespalib_tensor_dense> $<TARGET_OBJECTS:vespalib_vespalib_tensor_serialization> - $<TARGET_OBJECTS:vespalib_vespalib_tensor_simple> INSTALL lib64 DEPENDS vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/compact/CMakeLists.txt deleted file mode 100644 index c3322946593..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_library(vespalib_vespalib_tensor_compact OBJECT - SOURCES - compact_tensor.cpp - compact_tensor_dimension_sum.cpp - compact_tensor_product.cpp - compact_tensor_v2.cpp - compact_tensor_v2_dimension_sum.cpp - compact_tensor_v2_match.cpp - compact_tensor_v2_product.cpp - compact_tensor_address.cpp - compact_tensor_address_builder.cpp - compact_tensor_builder.cpp - compact_tensor_v2_builder.cpp - compact_tensor_unsorted_address_builder.cpp - DEPENDS -) diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor.cpp b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor.cpp deleted file mode 100644 index 20dd57b233e..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor.cpp +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "compact_tensor.h" -#include "compact_tensor_address_builder.h" -#include "compact_tensor_dimension_sum.h" -#include "compact_tensor_product.h" -#include <vespa/vespalib/tensor/join_tensors.h> -#include <vespa/vespalib/tensor/tensor_apply.h> -#include <vespa/vespalib/tensor/tensor_visitor.h> -#include <sstream> - -namespace vespalib { -namespace tensor { - -namespace { - -void -copyCells(CompactTensor::Cells &cells, - const CompactTensor::Cells &cells_in, - Stash &stash) -{ - for (const auto &cell : cells_in) { - CompactTensorAddressRef oldRef = cell.first; - CompactTensorAddressRef newRef(oldRef, stash); - cells[newRef] = cell.second; - } -} - -} - -CompactTensor::CompactTensor(const Dimensions &dimensions_in, - const Cells &cells_in) - : _cells(), - _dimensions(dimensions_in), - _stash(STASH_CHUNK_SIZE) -{ - copyCells(_cells, cells_in, _stash); -} - - -CompactTensor::CompactTensor(Dimensions &&dimensions_in, - Cells &&cells_in, Stash &&stash_in) - : _cells(std::move(cells_in)), - _dimensions(std::move(dimensions_in)), - _stash(std::move(stash_in)) -{ -} - - -bool -CompactTensor::operator==(const CompactTensor &rhs) const -{ - return _dimensions == rhs._dimensions && _cells == rhs._cells; -} - - -CompactTensor::Dimensions -CompactTensor::combineDimensionsWith(const CompactTensor &rhs) const -{ - Dimensions result; - std::set_union(_dimensions.cbegin(), _dimensions.cend(), - rhs._dimensions.cbegin(), rhs._dimensions.cend(), - std::back_inserter(result)); - return result; -} - -eval::ValueType -CompactTensor::getType() const -{ - if (_dimensions.empty()) { - return eval::ValueType::double_type(); - } - std::vector<eval::ValueType::Dimension> dimensions; - std::copy(_dimensions.begin(), _dimensions.end(), std::back_inserter(dimensions)); - return eval::ValueType::tensor_type(dimensions); -} - -double -CompactTensor::sum() const -{ - double result = 0.0; - for (const auto &cell : _cells) { - result += cell.second; - } - return result; -} - -Tensor::UP -CompactTensor::add(const Tensor &arg) const -{ - const CompactTensor *rhs = dynamic_cast<const CompactTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return joinTensors(*this, *rhs, - [](double lhsValue, double rhsValue) { return lhsValue + rhsValue; }); -} - -Tensor::UP -CompactTensor::subtract(const Tensor &arg) const -{ - const CompactTensor *rhs = dynamic_cast<const CompactTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return joinTensorsNegated(*this, *rhs, - [](double lhsValue, double rhsValue) { return lhsValue + rhsValue; }); - // Note that -rhsCell.second is passed to the lambda function, that is why we do addition. -} - -Tensor::UP -CompactTensor::multiply(const Tensor &arg) const -{ - const CompactTensor *rhs = dynamic_cast<const CompactTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return CompactTensorProduct(*this, *rhs).result(); -} - -Tensor::UP -CompactTensor::min(const Tensor &arg) const -{ - const CompactTensor *rhs = dynamic_cast<const CompactTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return joinTensors(*this, *rhs, - [](double lhsValue, double rhsValue) { return std::min(lhsValue, rhsValue); }); -} - -Tensor::UP -CompactTensor::max(const Tensor &arg) const -{ - const CompactTensor *rhs = dynamic_cast<const CompactTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return joinTensors(*this, *rhs, - [](double lhsValue, double rhsValue) { return std::max(lhsValue, rhsValue); }); -} - -Tensor::UP -CompactTensor::match(const Tensor &arg) const -{ - const CompactTensor *rhs = dynamic_cast<const CompactTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - DirectTensorBuilder<CompactTensor> builder(combineDimensionsWith(*rhs)); - for (const auto &lhsCell : cells()) { - auto rhsItr = rhs->cells().find(lhsCell.first); - if (rhsItr != rhs->cells().end()) { - builder.insertCell(lhsCell.first, lhsCell.second * rhsItr->second); - } - } - return builder.build(); -} - -Tensor::UP -CompactTensor::apply(const CellFunction &func) const -{ - return TensorApply<CompactTensor>(*this, func).result(); -} - -Tensor::UP -CompactTensor::sum(const vespalib::string &dimension) const -{ - return CompactTensorDimensionSum(*this, dimension).result(); -} - -bool -CompactTensor::equals(const Tensor &arg) const -{ - const CompactTensor *rhs = dynamic_cast<const CompactTensor *>(&arg); - if (!rhs) { - return false; - } - return *this == *rhs; -} - -vespalib::string -CompactTensor::toString() const -{ - std::ostringstream stream; - stream << *this; - return stream.str(); -} - -Tensor::UP -CompactTensor::clone() const -{ - return std::make_unique<CompactTensor>(_dimensions, _cells); -} - -void -CompactTensor::print(std::ostream &out) const -{ - out << "{ "; - bool first = true; - CompactTensorAddress addr; - for (const auto &cell : cells()) { - if (!first) { - out << ", "; - } - addr.deserializeFromSparseAddressRef(cell.first); - out << addr << ":" << cell.second; - first = false; - } - out << " }"; -} - -void -CompactTensor::accept(TensorVisitor &visitor) const -{ - CompactTensorAddress caddr; - TensorAddressBuilder addrBuilder; - TensorAddress addr; - for (const auto &cell : _cells) { - caddr.deserializeFromSparseAddressRef(cell.first); - addrBuilder.clear(); - for (const auto &element : caddr.elements()) { - addrBuilder.add(element.dimension(), element.label()); - } - addr = addrBuilder.build(); - visitor.visit(addr, cell.second); - } -} - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor.h b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor.h deleted file mode 100644 index 71431d36461..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor.h +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/tensor/cell_function.h> -#include <vespa/vespalib/tensor/tensor.h> -#include <vespa/vespalib/tensor/tensor_address.h> -#include "compact_tensor_address.h" -#include <vespa/vespalib/tensor/types.h> -#include <vespa/vespalib/stllike/hash_map.h> -#include <vespa/vespalib/stllike/string.h> -#include <vespa/vespalib/util/stash.h> - -namespace vespalib { -namespace tensor { - -/** - * A tensor implementation using serialized tensor addresses to - * improve CPU cache and TLB hit ratio, relative to SimpleTensor - * implementation. - */ -class CompactTensor : public Tensor -{ -public: - typedef vespalib::hash_map<CompactTensorAddressRef, double> Cells; - typedef TensorDimensions Dimensions; - - static constexpr size_t STASH_CHUNK_SIZE = 16384u; - -private: - Cells _cells; - Dimensions _dimensions; - Stash _stash; - -public: - explicit CompactTensor(const Dimensions &dimensions_in, - const Cells &cells_in); - CompactTensor(Dimensions &&dimensions_in, - Cells &&cells_in, Stash &&stash_in); - const Cells &cells() const { return _cells; } - const Dimensions &dimensions() const { return _dimensions; } - bool operator==(const CompactTensor &rhs) const; - Dimensions combineDimensionsWith(const CompactTensor &rhs) const; - - virtual eval::ValueType getType() const override; - virtual double sum() const override; - virtual Tensor::UP add(const Tensor &arg) const override; - virtual Tensor::UP subtract(const Tensor &arg) const override; - virtual Tensor::UP multiply(const Tensor &arg) const override; - virtual Tensor::UP min(const Tensor &arg) const override; - virtual Tensor::UP max(const Tensor &arg) const override; - virtual Tensor::UP match(const Tensor &arg) const override; - virtual Tensor::UP apply(const CellFunction &func) const override; - virtual Tensor::UP sum(const vespalib::string &dimension) const override; - virtual bool equals(const Tensor &arg) const override; - virtual void print(std::ostream &out) const override; - virtual vespalib::string toString() const override; - virtual Tensor::UP clone() const override; - virtual void accept(TensorVisitor &visitor) const override; -}; - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address.cpp b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address.cpp deleted file mode 100644 index e3cada25285..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address.cpp +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "compact_tensor_address.h" -#include "compact_tensor_v2_address_decoder.h" -#include <algorithm> - -namespace vespalib { -namespace tensor { - -namespace -{ - -void -setupElements(CompactTensorAddress::Elements &elements, - CompactTensorAddressRef ref) -{ - const char *cur = static_cast<const char *>(ref.start()); - const char *end = cur + ref.size(); - while (cur != end) { - const char *dim = cur; - while (*cur) { - ++cur; - } - ++cur; - const char *label = cur; - while (*cur) { - ++cur; - } - ++cur; - elements.emplace_back(vespalib::stringref(dim, label - 1 - dim), - vespalib::stringref(label, cur - 1 - label)); - } -} - - -} - - - -CompactTensorAddress::CompactTensorAddress() - : _elements() -{ -} - -CompactTensorAddress::CompactTensorAddress(const Elements &elements_in) - : _elements(elements_in) -{ -} - -bool -CompactTensorAddress::hasDimension(const vespalib::string &dimension) const -{ - for (const auto &elem : _elements) { - if (elem.dimension() == dimension) { - return true; - } - } - return false; -} - -bool -CompactTensorAddress::operator<(const CompactTensorAddress &rhs) const -{ - size_t minSize = std::min(_elements.size(), rhs._elements.size()); - for (size_t i = 0; i < minSize; ++i) { - if (_elements[i] != rhs._elements[i]) { - return _elements[i] < rhs._elements[i]; - } - } - return _elements.size() < rhs._elements.size(); -} - -bool -CompactTensorAddress::operator==(const CompactTensorAddress &rhs) const -{ - return _elements == rhs._elements; -} - - -void -CompactTensorAddress::deserializeFromSparseAddressRef(CompactTensorAddressRef - ref) -{ - _elements.clear(); - setupElements(_elements, ref); -} - - -void -CompactTensorAddress::deserializeFromAddressRefV2(CompactTensorAddressRef ref, - const TensorDimensions & - dimensions) -{ - _elements.clear(); - CompactTensorV2AddressDecoder addr(ref); - for (auto &dim : dimensions) { - auto label = addr.decodeLabel(); - if (label.size() != 0u) { - _elements.emplace_back(dim, label); - } - } - assert(!addr.valid()); -} - - - -std::ostream & -operator<<(std::ostream &out, const CompactTensorAddress::Elements &elements) -{ - out << "{"; - bool first = true; - for (const auto &elem : elements) { - if (!first) { - out << ","; - } - out << elem.dimension() << ":" << elem.label(); - first = false; - } - out << "}"; - return out; -} - -std::ostream & -operator<<(std::ostream &out, const CompactTensorAddress &value) -{ - out << value.elements(); - return out; -} - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address.h b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address.h deleted file mode 100644 index 509c267323c..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address.h +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/stllike/string.h> -#include <iostream> -#include <vector> -#include "compact_tensor_address_ref.h" -#include <vespa/vespalib/tensor/types.h> - -namespace vespalib { -namespace tensor { - -/** - * A compact sparse immutable address to a tensor cell. - * - * Only dimensions which have a different label than "undefined" are - * explicitly included. - * - * Tensor addresses are ordered by the natural order of the elements - * in sorted order. - */ -class CompactTensorAddress -{ -public: - class Element - { - private: - vespalib::stringref _dimension; - vespalib::stringref _label; - - public: - Element(vespalib::stringref dimension_in, - vespalib::stringref label_in) - : _dimension(dimension_in), _label(label_in) - {} - vespalib::stringref dimension() const { return _dimension; } - vespalib::stringref label() const { return _label; } - bool operator<(const Element &rhs) const { - if (_dimension == rhs._dimension) { - // Define sort order when dimension is the same to be able - // to do set operations over element vectors. - return _label < rhs._label; - } - return _dimension < rhs._dimension; - } - bool operator==(const Element &rhs) const { - return (_dimension == rhs._dimension) && (_label == rhs._label); - } - bool operator!=(const Element &rhs) const { - return !(*this == rhs); - } - }; - - typedef std::vector<Element> Elements; - -private: - Elements _elements; - -public: - CompactTensorAddress(); - explicit CompactTensorAddress(const Elements &elements_in); - const Elements &elements() const { return _elements; } - bool hasDimension(const vespalib::string &dimension) const; - bool operator<(const CompactTensorAddress &rhs) const; - bool operator==(const CompactTensorAddress &rhs) const; - void deserializeFromSparseAddressRef(CompactTensorAddressRef ref); - void deserializeFromAddressRefV2(CompactTensorAddressRef ref, - const TensorDimensions &dimensions); -}; - -std::ostream &operator<<(std::ostream &out, const CompactTensorAddress::Elements &elements); -std::ostream &operator<<(std::ostream &out, const CompactTensorAddress &value); - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address_builder.cpp b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address_builder.cpp deleted file mode 100644 index 03f2ec0fd15..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address_builder.cpp +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "compact_tensor_address_builder.h" -#include <algorithm> - -namespace vespalib { -namespace tensor { - -namespace -{ - -void -append(std::vector<char> &address, vespalib::stringref str) -{ - const char *cstr = str.c_str(); - address.insert(address.end(), cstr, cstr + str.size() + 1); -} - -} - -CompactTensorAddressBuilder::CompactTensorAddressBuilder() - : _address() -{ -} - - -void -CompactTensorAddressBuilder::add(vespalib::stringref dimension, - vespalib::stringref label) -{ - append(_address, dimension); - append(_address, label); -} - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address_builder.h b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address_builder.h deleted file mode 100644 index 2981352eef5..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address_builder.h +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/stllike/string.h> -#include <vector> -#include "compact_tensor_address_ref.h" - -namespace vespalib { -namespace tensor { - - -class CompactTensorAddress; - -/** - * A writer to serialize tensor addresses into a compact representation. - * - * Format: (dimStr NUL labelStr NUL)* - */ -class CompactTensorAddressBuilder -{ -private: - std::vector<char> _address; -public: - CompactTensorAddressBuilder(); - void add(vespalib::stringref dimension, vespalib::stringref label); - void clear() { _address.clear(); } - CompactTensorAddressRef getAddressRef() const { - return CompactTensorAddressRef(&_address[0], _address.size()); - } - bool empty() const { return _address.empty(); } -}; - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_builder.cpp b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_builder.cpp deleted file mode 100644 index e7a677253bd..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_builder.cpp +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "compact_tensor_builder.h" -#include <vespa/vespalib/tensor/tensor.h> - -namespace vespalib { -namespace tensor { - -CompactTensorBuilder::CompactTensorBuilder() - : TensorBuilder(), - _addressBuilder(), - _normalizedAddressBuilder(), - _cells(), - _stash(CompactTensor::STASH_CHUNK_SIZE), - _dimensionsEnum(), - _dimensions() -{ -} - -CompactTensorBuilder::~CompactTensorBuilder() -{ -} - - -TensorBuilder::Dimension -CompactTensorBuilder::define_dimension(const vespalib::string &dimension) -{ - auto it = _dimensionsEnum.find(dimension); - if (it != _dimensionsEnum.end()) { - return it->second; - } - Dimension res = _dimensionsEnum.size(); - auto insres = _dimensionsEnum.insert(std::make_pair(dimension, res)); - assert(insres.second); - assert(insres.first->second == res); - assert(_dimensions.size() == res); - _dimensions.push_back(dimension); - return res; -} - -TensorBuilder & -CompactTensorBuilder::add_label(Dimension dimension, - const vespalib::string &label) -{ - assert(dimension <= _dimensions.size()); - _addressBuilder.add(_dimensions[dimension], label); - return *this; -} - -TensorBuilder & -CompactTensorBuilder::add_cell(double value) -{ - _addressBuilder.buildTo(_normalizedAddressBuilder); - CompactTensorAddressRef taddress(_normalizedAddressBuilder.getAddressRef()); - // Make a persistent copy of compact tensor address owned by _stash - CompactTensorAddressRef address(taddress, _stash); - _cells[address] = value; - _addressBuilder.clear(); - _normalizedAddressBuilder.clear(); - return *this; -} - - -Tensor::UP -CompactTensorBuilder::build() -{ - assert(_addressBuilder.empty()); - CompactTensor::Dimensions dimensions(_dimensions.begin(), - _dimensions.end()); - std::sort(dimensions.begin(), dimensions.end()); - Tensor::UP ret = std::make_unique<CompactTensor>(std::move(dimensions), - std::move(_cells), - std::move(_stash)); - CompactTensor::Cells().swap(_cells); - _dimensionsEnum.clear(); - _dimensions.clear(); - return ret; -} - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_builder.h deleted file mode 100644 index d40235c8e92..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_builder.h +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "compact_tensor.h" -#include "compact_tensor_address_builder.h" -#include "compact_tensor_unsorted_address_builder.h" -#include <vespa/vespalib/tensor/tensor_builder.h> -#include <vespa/vespalib/tensor/tensor_address.h> -#include <vespa/vespalib/stllike/hash_map.h> -#include <vespa/vespalib/util/stash.h> - -namespace vespalib { -namespace tensor { - -/** - * A builder of compact tensors. - */ -class CompactTensorBuilder : public TensorBuilder -{ - CompactTensorUnsortedAddressBuilder _addressBuilder; // unsorted dimensions - CompactTensorAddressBuilder _normalizedAddressBuilder; // sorted dimensions - CompactTensor::Cells _cells; - Stash _stash; - vespalib::hash_map<vespalib::string, uint32_t> _dimensionsEnum; - std::vector<vespalib::string> _dimensions; -public: - CompactTensorBuilder(); - virtual ~CompactTensorBuilder(); - - virtual Dimension - define_dimension(const vespalib::string &dimension) override; - virtual TensorBuilder & - add_label(Dimension dimension, - const vespalib::string &label) override; - virtual TensorBuilder &add_cell(double value) override; - - virtual Tensor::UP build() override; -}; - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_dimension_sum.cpp b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_dimension_sum.cpp deleted file mode 100644 index 18fdb83ed96..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_dimension_sum.cpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "compact_tensor_dimension_sum.h" -#include <vespa/vespalib/tensor/tensor_address_element_iterator.h> -#include <vespa/vespalib/tensor/decoded_tensor_address_store.h> - -namespace vespalib { -namespace tensor { - -namespace { - -template <class AddressBuilder, class Address> -void -removeDimension(AddressBuilder &addressBuilder, - const Address &address, - const vespalib::stringref dimension) -{ - addressBuilder.clear(); - for (const auto &elem : address.elements()) { - if (elem.dimension() != dimension) { - addressBuilder.add(elem.dimension(), elem.label()); - } - } -} - -TensorDimensions -removeDimension(const TensorDimensions &dimensions, - const vespalib::string &dimension) -{ - TensorDimensions result = dimensions; - auto itr = std::lower_bound(result.begin(), result.end(), dimension); - if (itr != result.end() && *itr == dimension) { - result.erase(itr); - } - return result; -} - -} - -CompactTensorDimensionSum::CompactTensorDimensionSum(const TensorImplType & - tensor, - const vespalib::string & - dimension) - : Parent(removeDimension(tensor.dimensions(), dimension)) -{ - AddressBuilderType reducedAddress; - DecodedTensorAddressStore<AddressType> cellAddr; - for (const auto &cell : tensor.cells()) { - cellAddr.set(cell.first); - removeDimension<AddressBuilderType, AddressType> - (reducedAddress, cellAddr.get(cell.first), dimension); - _builder.insertCell(reducedAddress, cell.second, - [](double cellValue, double rhsValue) { return cellValue + rhsValue; }); - } -} - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_dimension_sum.h b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_dimension_sum.h deleted file mode 100644 index 7a5df115020..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_dimension_sum.h +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/tensor/tensor_operation.h> - -namespace vespalib { -namespace tensor { - -/** - * Returns a tensor with the given dimension removed and the cell values in that dimension summed. - */ -class CompactTensorDimensionSum : public TensorOperation<CompactTensor> -{ -public: - using TensorImplType = CompactTensor; - using Parent = TensorOperation<CompactTensor>; - using AddressBuilderType = typename Parent::AddressBuilderType; - using AddressType = typename Parent::AddressType; - using Parent::_builder; - CompactTensorDimensionSum(const TensorImplType &tensor, - const vespalib::string &dimension); -}; - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_product.cpp b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_product.cpp deleted file mode 100644 index d66ae2d1fec..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_product.cpp +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "compact_tensor_product.h" -#include <vespa/vespalib/tensor/tensor_address_element_iterator.h> -#include <vespa/vespalib/tensor/dimensions_vector_iterator.h> -#include <vespa/vespalib/tensor/decoded_tensor_address_store.h> -#include <vespa/vespalib/tensor/join_tensor_addresses.h> -#include <type_traits> - -constexpr bool onthefly_tensor_address_decoding = true; - -namespace vespalib { -namespace tensor { - -namespace { - -template <class Dimensions> -void -calcIntersectDimensions(DimensionsVector &res, - const Dimensions &lhs, const Dimensions &rhs) -{ - std::set_intersection(lhs.cbegin(), lhs.cend(), rhs.cbegin(), rhs.cend(), - std::back_inserter(res)); -} - - -template <class Dimensions> -void -calcIntersectDimensions(DimensionsSet &res, - const Dimensions &lhs, const Dimensions &rhs) -{ - for (const auto &dimension : lhs) { - if (std::binary_search(rhs.begin(), rhs.end(), dimension)) { - res.insert(vespalib::stringref(dimension.c_str(), - dimension.size())); - } - } -} - - -} - - -template <class DimensionsCollection> -void -CompactTensorProduct::template bruteForceProduct(const TensorImplType &lhs, - const TensorImplType &rhs) -{ - DimensionsCollection iDims; - calcIntersectDimensions<Dimensions>(iDims, - lhs.dimensions(), rhs.dimensions()); - using RhsAddressType = - typename std::conditional<onthefly_tensor_address_decoding, - AddressRefType, AddressType>::type; - DecodedTensorAddressStore<AddressType> lhsAddr; - DecodedTensorAddressStore<RhsAddressType> rhsAddr; - AddressBuilderType combinedAddress; - for (const auto &lhsCell : lhs.cells()) { - lhsAddr.set(lhsCell.first); - for (const auto &rhsCell : rhs.cells()) { - rhsAddr.set(rhsCell.first); - bool combineSuccess = joinTensorAddresses<AddressBuilderType, - AddressType, RhsAddressType> - (combinedAddress, iDims, - lhsAddr.get(lhsCell.first), - rhsAddr.get(rhsCell.first)); - if (combineSuccess) { - _builder.insertCell(combinedAddress, lhsCell.second * rhsCell.second); - } - } - } -} - - -void -CompactTensorProduct::fastProduct(const TensorImplType &lhs, - const TensorImplType &rhs) -{ - const typename TensorImplType::Cells &rhsCells = rhs.cells(); - for (const auto &lhsCell : lhs.cells()) { - auto itr = rhsCells.find(lhsCell.first); - if (itr != rhsCells.end()) { - _builder.insertCell(lhsCell.first, lhsCell.second * itr->second); - } - } -} - - -CompactTensorProduct::CompactTensorProduct(const TensorImplType &lhs, - const TensorImplType &rhs) - : Parent(lhs.combineDimensionsWith(rhs)) -{ -#if 0 - /* Commented ut for now since we want to see brute force performance. */ - // All dimensions are common - if (lhs.dimensions().size() == rhs.dimensions().size() && - lhs.dimensions().size() == _builder.dimensions().size()) { - fastProduct(lhs, rhs); - return; - } - // TODO: Handle zero cells or zero dimensions cases - // No dimensions are common - if (lhs.dimensions().size() + rhs.dimensions().size() == - _builder.dimensions().size()) { - bruteForceNoCommonDimensionProduct(lhs, rhs); - return; - } - // lhs dimensions equals common dimensions - if (rhs.dimensions().size() == _builder.dimensions().size()) { - } - // rhs dimensions equals common dimensions - if (lhs.dimensions().size() == _builder.dimensions().size()) { - } -#endif -#if 1 - // few common dimensions - bruteForceProduct<DimensionsVector>(lhs, rhs); -#else - // many common dimensions, too expensive to iterate through all of - // them if each cell has relatively few dimensions. - bruteForceProduct<DimensionsSet>(lhs, rhs); -#endif -} - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_product.h b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_product.h deleted file mode 100644 index 0c2db688730..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_product.h +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/tensor/tensor_operation.h> - -namespace vespalib { -namespace tensor { - -/** - * Returns the sparse tensor product of the two given tensors. - * This is all combinations of all cells in the first tensor with all cells of - * the second tensor, except the combinations which would have multiple labels - * for the same dimension due to shared dimensions between the two tensors. - * - * If there are no overlapping dimensions this is the regular tensor product. - * If the two tensors have exactly the same dimensions this is the Hadamard product. - * - * The sparse tensor is associative and commutative. Its dimensions are the - * set of the dimensions of the two input tensors. - */ -class CompactTensorProduct : public TensorOperation<CompactTensor> -{ -public: - using TensorImplType = CompactTensor; - using Parent = TensorOperation<CompactTensor>; - using Dimensions = typename Parent::Dimensions; - using AddressBuilderType = typename Parent::AddressBuilderType; - using AddressRefType = typename Parent::AddressRefType; - using AddressType = typename Parent::AddressType; - using Parent::_builder; - -private: - template <class DimensionsCollection> - void - bruteForceProduct(const TensorImplType &lhs, const TensorImplType &rhs); - - void - fastProduct(const TensorImplType &lhs, const TensorImplType &rhs); - -public: - CompactTensorProduct(const TensorImplType &lhs, const TensorImplType &rhs); -}; - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2.cpp b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2.cpp deleted file mode 100644 index a9b90196bc6..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2.cpp +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "compact_tensor_v2.h" -#include "compact_tensor_v2_address_builder.h" -#include "compact_tensor_v2_dimension_sum.h" -#include "compact_tensor_v2_match.h" -#include "compact_tensor_v2_product.h" -#include "join_compact_tensors_v2.h" -#include <vespa/vespalib/tensor/tensor_apply.h> -#include <vespa/vespalib/tensor/tensor_visitor.h> -#include <sstream> - - -namespace vespalib { -namespace tensor { - -namespace { - -using Cells = CompactTensorV2::Cells; - -void -copyCells(Cells &cells, const Cells &cells_in, Stash &stash) -{ - for (const auto &cell : cells_in) { - CompactTensorAddressRef oldRef = cell.first; - CompactTensorAddressRef newRef(oldRef, stash); - cells[newRef] = cell.second; - } -} - -} - -CompactTensorV2::CompactTensorV2(const Dimensions &dimensions_in, - const Cells &cells_in) - : _cells(), - _dimensions(dimensions_in), - _stash(STASH_CHUNK_SIZE) -{ - copyCells(_cells, cells_in, _stash); -} - - -CompactTensorV2::CompactTensorV2(Dimensions &&dimensions_in, - Cells &&cells_in, Stash &&stash_in) - : _cells(std::move(cells_in)), - _dimensions(std::move(dimensions_in)), - _stash(std::move(stash_in)) -{ -} - - -bool -CompactTensorV2::operator==(const CompactTensorV2 &rhs) const -{ - return _dimensions == rhs._dimensions && _cells == rhs._cells; -} - - -CompactTensorV2::Dimensions -CompactTensorV2::combineDimensionsWith(const CompactTensorV2 &rhs) const -{ - Dimensions result; - std::set_union(_dimensions.cbegin(), _dimensions.cend(), - rhs._dimensions.cbegin(), rhs._dimensions.cend(), - std::back_inserter(result)); - return result; -} - -eval::ValueType -CompactTensorV2::getType() const -{ - if (_dimensions.empty()) { - return eval::ValueType::double_type(); - } - std::vector<eval::ValueType::Dimension> dimensions; - std::copy(_dimensions.begin(), _dimensions.end(), std::back_inserter(dimensions)); - return eval::ValueType::tensor_type(dimensions); -} - -double -CompactTensorV2::sum() const -{ - double result = 0.0; - for (const auto &cell : _cells) { - result += cell.second; - } - return result; -} - -Tensor::UP -CompactTensorV2::add(const Tensor &arg) const -{ - const CompactTensorV2 *rhs = dynamic_cast<const CompactTensorV2 *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return joinCompactTensorsV2(*this, *rhs, - [](double lhsValue, double rhsValue) { return lhsValue + rhsValue; }); -} - -Tensor::UP -CompactTensorV2::subtract(const Tensor &arg) const -{ - const CompactTensorV2 *rhs = dynamic_cast<const CompactTensorV2 *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - // Note that -rhsCell.second is passed to the lambda function, that is why we do addition. - return joinCompactTensorsV2Negated(*this, *rhs, - [](double lhsValue, double rhsValue) { return lhsValue + rhsValue; }); -} - -Tensor::UP -CompactTensorV2::multiply(const Tensor &arg) const -{ - const CompactTensorV2 *rhs = dynamic_cast<const CompactTensorV2 *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return CompactTensorV2Product(*this, *rhs).result(); -} - -Tensor::UP -CompactTensorV2::min(const Tensor &arg) const -{ - const CompactTensorV2 *rhs = dynamic_cast<const CompactTensorV2 *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return joinCompactTensorsV2(*this, *rhs, - [](double lhsValue, double rhsValue) { return std::min(lhsValue, rhsValue); }); -} - -Tensor::UP -CompactTensorV2::max(const Tensor &arg) const -{ - const CompactTensorV2 *rhs = dynamic_cast<const CompactTensorV2 *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return joinCompactTensorsV2(*this, *rhs, - [](double lhsValue, double rhsValue) { return std::max(lhsValue, rhsValue); }); -} - -Tensor::UP -CompactTensorV2::match(const Tensor &arg) const -{ - const CompactTensorV2 *rhs = dynamic_cast<const CompactTensorV2 *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return CompactTensorV2Match(*this, *rhs).result(); -} - -Tensor::UP -CompactTensorV2::apply(const CellFunction &func) const -{ - return TensorApply<CompactTensorV2>(*this, func).result(); -} - -Tensor::UP -CompactTensorV2::sum(const vespalib::string &dimension) const -{ - return CompactTensorV2DimensionSum(*this, dimension).result(); -} - -bool -CompactTensorV2::equals(const Tensor &arg) const -{ - const CompactTensorV2 *rhs = dynamic_cast<const CompactTensorV2 *>(&arg); - if (!rhs) { - return false; - } - return *this == *rhs; -} - -vespalib::string -CompactTensorV2::toString() const -{ - std::ostringstream stream; - stream << *this; - return stream.str(); -} - -Tensor::UP -CompactTensorV2::clone() const -{ - return std::make_unique<CompactTensorV2>(_dimensions, _cells); -} - -void -CompactTensorV2::print(std::ostream &out) const -{ - out << "{ "; - bool first = true; - CompactTensorAddress addr; - for (const auto &cell : cells()) { - if (!first) { - out << ", "; - } - addr.deserializeFromAddressRefV2(cell.first, _dimensions); - out << addr << ":" << cell.second; - first = false; - } - out << " }"; -} - -void -CompactTensorV2::accept(TensorVisitor &visitor) const -{ - TensorAddressBuilder addrBuilder; - TensorAddress addr; - for (const auto &cell : _cells) { - CompactTensorV2AddressDecoder decoder(cell.first); - addrBuilder.clear(); - for (const auto &dimension : _dimensions) { - auto label = decoder.decodeLabel(); - if (label.size() != 0u) { - addrBuilder.add(dimension, label); - } - } - assert(!decoder.valid()); - addr = addrBuilder.build(); - visitor.visit(addr, cell.second); - } -} - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_dimension_sum.cpp b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_dimension_sum.cpp deleted file mode 100644 index d76078ba52e..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_dimension_sum.cpp +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "compact_tensor_v2_dimension_sum.h" -#include "compact_tensor_v2_address_decoder.h" - -namespace vespalib { -namespace tensor { - -namespace { - -enum class AddressOp -{ - REMOVE, - COPY -}; - -using ReduceOps = std::vector<AddressOp>; - - -ReduceOps -buildReduceOps(const TensorDimensions &dims, - const vespalib::stringref &dimension) -{ - ReduceOps ops; - for (auto &dim : dims) { - if (dim == dimension) { - ops.push_back(AddressOp::REMOVE); - } else { - ops.push_back(AddressOp::COPY); - } - } - return ops; -} - - -void -reduceAddress(CompactTensorV2AddressBuilder &builder, - CompactTensorAddressRef ref, - const ReduceOps &ops) -{ - builder.clear(); - CompactTensorV2AddressDecoder addr(ref); - for (auto op : ops) { - switch (op) { - case AddressOp::REMOVE: - addr.skipLabel(); - break; - case AddressOp::COPY: - builder.add(addr.decodeLabel()); - break; - } - } - assert(!addr.valid()); -} - -TensorDimensions -removeDimension(const TensorDimensions &dimensions, - const vespalib::string &dimension) -{ - TensorDimensions result = dimensions; - auto itr = std::lower_bound(result.begin(), result.end(), dimension); - if (itr != result.end() && *itr == dimension) { - result.erase(itr); - } - return result; -} - -} - -CompactTensorV2DimensionSum::CompactTensorV2DimensionSum(const TensorImplType & - tensor, - const - vespalib::string & - dimension) - : Parent(removeDimension(tensor.dimensions(), dimension)) -{ - ReduceOps ops(buildReduceOps(tensor.dimensions(), dimension)); - AddressBuilderType reducedAddress; - for (const auto &cell : tensor.cells()) { - reduceAddress(reducedAddress, cell.first, ops); - _builder.insertCell(reducedAddress, cell.second, - [](double cellValue, double rhsValue) { return cellValue + rhsValue; }); - } -} - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_dimension_sum.h b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_dimension_sum.h deleted file mode 100644 index 959135ce91c..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_dimension_sum.h +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/tensor/tensor_operation.h> - -namespace vespalib { -namespace tensor { - -/** - * Returns a tensor with the given dimension removed and the cell values in that dimension summed. - */ -class CompactTensorV2DimensionSum : public TensorOperation<CompactTensorV2> -{ -public: - using TensorImplType = CompactTensorV2; - using Parent = TensorOperation<CompactTensorV2>; - using AddressBuilderType = typename Parent::AddressBuilderType; - using AddressType = typename Parent::AddressType; - using Parent::_builder; - CompactTensorV2DimensionSum(const TensorImplType &tensor, - const vespalib::string &dimension); -}; - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_product.cpp b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_product.cpp deleted file mode 100644 index 3eeb68b3098..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_product.cpp +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "compact_tensor_v2_product.h" -#include "compact_tensor_v2_address_decoder.h" -#include <type_traits> - -namespace vespalib { -namespace tensor { - -namespace { - -enum class AddressOp -{ - LHS, - RHS, - BOTH -}; - -using CombineOps = std::vector<AddressOp>; - -CombineOps -buildCombineOps(const TensorDimensions &lhs, - const TensorDimensions &rhs) -{ - CombineOps ops; - auto rhsItr = rhs.cbegin(); - auto rhsItrEnd = rhs.cend(); - for (auto &lhsDim : lhs) { - while (rhsItr != rhsItrEnd && *rhsItr < lhsDim) { - ops.push_back(AddressOp::RHS); - ++rhsItr; - } - if (rhsItr != rhsItrEnd && *rhsItr == lhsDim) { - ops.push_back(AddressOp::BOTH); - ++rhsItr; - } else { - ops.push_back(AddressOp::LHS); - } - } - while (rhsItr != rhsItrEnd) { - ops.push_back(AddressOp::RHS); - ++rhsItr; - } - return ops; -} - - -bool -combineAddresses(CompactTensorV2AddressBuilder &builder, - CompactTensorAddressRef lhsRef, - CompactTensorAddressRef rhsRef, - const CombineOps &ops) -{ - builder.clear(); - CompactTensorV2AddressDecoder lhs(lhsRef); - CompactTensorV2AddressDecoder rhs(rhsRef); - for (auto op : ops) { - switch (op) { - case AddressOp::LHS: - builder.add(lhs.decodeLabel()); - break; - case AddressOp::RHS: - builder.add(rhs.decodeLabel()); - break; - case AddressOp::BOTH: - auto lhsLabel(lhs.decodeLabel()); - auto rhsLabel(rhs.decodeLabel()); - if (lhsLabel != rhsLabel) { - return false; - } - builder.add(lhsLabel); - } - } - assert(!lhs.valid()); - assert(!rhs.valid()); - return true; -} - -} - - -void -CompactTensorV2Product::bruteForceProduct(const TensorImplType &lhs, - const TensorImplType &rhs) -{ - CombineOps ops(buildCombineOps(lhs.dimensions(), rhs.dimensions())); - CompactTensorV2AddressBuilder addressBuilder; - for (const auto &lhsCell : lhs.cells()) { - for (const auto &rhsCell : rhs.cells()) { - bool combineSuccess = combineAddresses(addressBuilder, - lhsCell.first, rhsCell.first, - ops); - if (combineSuccess) { - _builder.insertCell(addressBuilder.getAddressRef(), - lhsCell.second * rhsCell.second); - } - } - } -} - - -void -CompactTensorV2Product::fastProduct(const TensorImplType &lhs, - const TensorImplType &rhs) -{ - const typename TensorImplType::Cells &rhsCells = rhs.cells(); - for (const auto &lhsCell : lhs.cells()) { - auto itr = rhsCells.find(lhsCell.first); - if (itr != rhsCells.end()) { - _builder.insertCell(lhsCell.first, - lhsCell.second * itr->second); - } - } -} - - -CompactTensorV2Product::CompactTensorV2Product(const TensorImplType &lhs, - const TensorImplType &rhs) - : Parent(lhs.combineDimensionsWith(rhs)) -{ -#if 0 - /* Commented ut for now since we want to see brute force performance. */ - // All dimensions are common - if (lhs.dimensions().size() == rhs.dimensions().size() && - lhs.dimensions().size() == _builder.dimensions().size()) { - fastProduct(lhs, rhs); - return; - } - // TODO: Handle zero cells or zero dimensions cases - // No dimensions are common - if (lhs.dimensions().size() + rhs.dimensions().size() == - _builder.dimensions().size()) { - bruteForceNoCommonDimensionProduct(lhs, rhs); - return; - } - // lhs dimensions equals common dimensions - if (rhs.dimensions().size() == _builder.dimensions().size()) { - } - // rhs dimensions equals common dimensions - if (lhs.dimensions().size() == _builder.dimensions().size()) { - } -#endif - bruteForceProduct(lhs, rhs); -} - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_product.h b/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_product.h deleted file mode 100644 index c0e2f8f4088..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_product.h +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/tensor/tensor_operation.h> - -namespace vespalib { -namespace tensor { - -/** - * Returns the sparse tensor product of the two given tensors. - * This is all combinations of all cells in the first tensor with all cells of - * the second tensor, except the combinations which would have multiple labels - * for the same dimension due to shared dimensions between the two tensors. - * - * If there are no overlapping dimensions this is the regular tensor product. - * If the two tensors have exactly the same dimensions this is the Hadamard product. - * - * The sparse tensor is associative and commutative. Its dimensions are the - * set of the dimensions of the two input tensors. - */ -class CompactTensorV2Product : public TensorOperation<CompactTensorV2> -{ -public: - using TensorImplType = CompactTensorV2; - using Parent = TensorOperation<CompactTensorV2>; - using Dimensions = typename Parent::Dimensions; - using AddressBuilderType = typename Parent::AddressBuilderType; - using AddressRefType = typename Parent::AddressRefType; - using AddressType = typename Parent::AddressType; - using Parent::_builder; - -private: - void - bruteForceProduct(const TensorImplType &lhs, const TensorImplType &rhs); - - void - fastProduct(const TensorImplType &lhs, const TensorImplType &rhs); - -public: - CompactTensorV2Product(const TensorImplType &lhs, - const TensorImplType &rhs); -}; - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/direct_compact_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/compact/direct_compact_tensor_builder.h deleted file mode 100644 index ffd0133f4f0..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/direct_compact_tensor_builder.h +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/tensor/direct_tensor_builder.h> -#include "compact_tensor.h" -#include "compact_tensor_address_builder.h" - -namespace vespalib { -namespace tensor { - -/** - * Utility class to build tensors of type CompactTensor, to be used by - * tensor operations. - */ -template <> class DirectTensorBuilder<CompactTensor> -{ -public: - using TensorImplType = CompactTensor; - using Dimensions = typename TensorImplType::Dimensions; - using Cells = typename TensorImplType::Cells; - using AddressBuilderType = CompactTensorAddressBuilder; - using AddressRefType = CompactTensorAddressRef; - using AddressType = CompactTensorAddress; - -private: - Stash _stash; - Dimensions _dimensions; - Cells _cells; - -public: - void - copyCells(const Cells &cells_in) - { - for (const auto &cell : cells_in) { - CompactTensorAddressRef oldRef = cell.first; - CompactTensorAddressRef newRef(oldRef, _stash); - _cells[newRef] = cell.second; - } - } - - DirectTensorBuilder() - : _stash(TensorImplType::STASH_CHUNK_SIZE), - _dimensions(), - _cells() - { - } - - DirectTensorBuilder(const Dimensions &dimensions_in) - : _stash(TensorImplType::STASH_CHUNK_SIZE), - _dimensions(dimensions_in), - _cells() - { - } - - DirectTensorBuilder(const Dimensions &dimensions_in, - const Cells &cells_in) - : _stash(TensorImplType::STASH_CHUNK_SIZE), - _dimensions(dimensions_in), - _cells() - { - copyCells(cells_in); - } - - Tensor::UP build() { - return std::make_unique<CompactTensor>(std::move(_dimensions), - std::move(_cells), - std::move(_stash)); - } - - template <class Function> - void insertCell(CompactTensorAddressRef address, double value, - Function &&func) - { - CompactTensorAddressRef oldRef(address); - auto res = _cells.insert(std::make_pair(oldRef, value)); - if (res.second) { - // Replace key with own copy - res.first->first = CompactTensorAddressRef(oldRef, _stash); - } else { - res.first->second = func(res.first->second, value); - } - } - - void insertCell(CompactTensorAddressRef address, double value) { - // This address should not already exist and a new cell should be inserted. - insertCell(address, value, [](double, double) -> double { abort(); }); - } - - template <class Function> - void insertCell(CompactTensorAddressBuilder &address, double value, - Function &&func) - { - insertCell(address.getAddressRef(), value, func); - } - - void insertCell(CompactTensorAddressBuilder &address, double value) { - // This address should not already exist and a new cell should be inserted. - insertCell(address.getAddressRef(), value, - [](double, double) -> double { abort(); }); - } - - Dimensions &dimensions() { return _dimensions; } - Cells &cells() { return _cells; } -}; - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/join_compact_tensors_v2.h b/vespalib/src/vespa/vespalib/tensor/compact/join_compact_tensors_v2.h deleted file mode 100644 index 0fd89c4ec7b..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/compact/join_compact_tensors_v2.h +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -namespace vespalib { -namespace tensor { - -/* - * Join the cells of two tensors. - * The given function is used to calculate the resulting cell value for overlapping cells. - */ -template <typename Function> -Tensor::UP -joinCompactTensorsV2(const CompactTensorV2 &lhs, const CompactTensorV2 &rhs, - Function &&func) -{ - DirectTensorBuilder<CompactTensorV2> builder(lhs.combineDimensionsWith(rhs), - lhs.cells(), lhs.dimensions()); - if (builder.dimensions().size() == rhs.dimensions().size()) { - for (const auto &rhsCell : rhs.cells()) { - builder.insertCell(rhsCell.first, rhsCell.second, func); - } - } else { - CompactTensorV2AddressPadder addressPadder(builder.dimensions(), - rhs.dimensions()); - for (const auto &rhsCell : rhs.cells()) { - addressPadder.padAddress(rhsCell.first); - builder.insertCell(addressPadder, rhsCell.second, func); - } - } - return builder.build(); -} - -/* - * Join the cells of two tensors, where the rhs values are treated as negated values. - * The given function is used to calculate the resulting cell value for overlapping cells. - */ -template <typename Function> -Tensor::UP -joinCompactTensorsV2Negated(const CompactTensorV2 &lhs, - const CompactTensorV2 &rhs, - Function &&func) -{ - DirectTensorBuilder<CompactTensorV2> builder(lhs.combineDimensionsWith(rhs), - lhs.cells(), lhs.dimensions()); - if (builder.dimensions().size() == rhs.dimensions().size()) { - for (const auto &rhsCell : rhs.cells()) { - builder.insertCell(rhsCell.first, -rhsCell.second, func); - } - } else { - CompactTensorV2AddressPadder addressPadder(builder.dimensions(), - rhs.dimensions()); - for (const auto &rhsCell : rhs.cells()) { - addressPadder.padAddress(rhsCell.first); - builder.insertCell(addressPadder, -rhsCell.second, func); - } - } - return builder.build(); -} - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h b/vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h deleted file mode 100644 index b2d8d1b07ce..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -namespace vespalib { -namespace tensor { - -/** - * A utility class to store decoded tensor address based on data stored - * in tensors. - */ -template <class AddressT> class DecodedTensorAddressStore; - -/** - * A utility class to store decoded tensor address. TensorAddress - * doesn't need any decoding, just pass through the argument - * (e.g. tensor address in tensor hash table). - */ -template <> class DecodedTensorAddressStore<TensorAddress> -{ -public: - void set(const TensorAddress &) { } - static const TensorAddress &get(const TensorAddress &rhs) { return rhs; } -}; - -/** - * A utility class to store decoded tensor address. - * CompactTensorAddress needs decoding. - */ -template <> class DecodedTensorAddressStore<CompactTensorAddress> -{ -private: - CompactTensorAddress _address; -public: - void set(const CompactTensorAddressRef rhs) - { _address.deserializeFromSparseAddressRef(rhs); } - const CompactTensorAddress &get(const CompactTensorAddressRef &) - { return _address; } -}; - -/** - * A utility class to store decoded tensor address. Just pass through - * the argument (e.g. tensor address ref in tensor hash table). - * CompactTensorAddressRef is encoded, decoding is performed on the - * fly while iterating. - */ -template <> class DecodedTensorAddressStore<CompactTensorAddressRef> -{ -public: - void set(const CompactTensorAddressRef &) { } - static CompactTensorAddressRef get(const CompactTensorAddressRef rhs) - { return rhs; } -}; - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/default_tensor.h b/vespalib/src/vespa/vespalib/tensor/default_tensor.h index 61c564e4943..2423e677eff 100644 --- a/vespalib/src/vespa/vespalib/tensor/default_tensor.h +++ b/vespalib/src/vespa/vespalib/tensor/default_tensor.h @@ -2,15 +2,15 @@ #pragma once -#include "compact/compact_tensor_v2.h" -#include "compact/compact_tensor_v2_builder.h" +#include "sparse/sparse_tensor.h" +#include "sparse/sparse_tensor_builder.h" namespace vespalib { namespace tensor { struct DefaultTensor { - using type = CompactTensorV2; - using builder = CompactTensorV2Builder; + using type = SparseTensor; + using builder = SparseTensorBuilder; }; } // namespace vespalib::tensor diff --git a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp index c34cfb78bbb..a2bc118c00b 100644 --- a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp +++ b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp @@ -47,6 +47,14 @@ DefaultTensorEngine::to_string(const Tensor &tensor) const return my_tensor.toString(); } +eval::TensorSpec +DefaultTensorEngine::to_spec(const Tensor &tensor) const +{ + assert(&tensor.engine() == this); + const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor); + return my_tensor.toSpec(); +} + struct IsAddOperation : public eval::DefaultOperationVisitor { bool result = false; void visitDefault(const eval::Operation &) override {} @@ -107,11 +115,11 @@ DefaultTensorEngine::reduce(const Tensor &tensor, const BinaryOperation &op, con const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor); IsAddOperation check; op.accept(check); + tensor::Tensor::UP result; if (check.result) { if (dimensions.empty()) { // sum return stash.create<eval::DoubleValue>(my_tensor.sum()); } else { // dimension sum - tensor::Tensor::UP result; for (const auto &dimension: dimensions) { if (result) { result = result->sum(dimension); @@ -119,8 +127,18 @@ DefaultTensorEngine::reduce(const Tensor &tensor, const BinaryOperation &op, con result = my_tensor.sum(dimension); } } + } + } else { + result = my_tensor.reduce(op, dimensions); + } + if (result) { + eval::ValueType result_type(result->getType()); + if (result_type.is_tensor()) { return stash.create<TensorValue>(std::move(result)); } + if (result_type.is_double()) { + return stash.create<eval::DoubleValue>(result->sum()); + } } return stash.create<ErrorValue>(); } @@ -147,8 +165,13 @@ struct TensorOperationOverride : eval::DefaultOperationVisitor { TensorOperationOverride(const tensor::Tensor &lhs_in, const tensor::Tensor &rhs_in) : lhs(lhs_in), rhs(rhs_in), result() {} - virtual void visitDefault(const eval::Operation &) override { + virtual void visitDefault(const eval::Operation &op) override { // empty result indicates error + const eval::BinaryOperation *binaryOp = + dynamic_cast<const eval::BinaryOperation *>(&op); + if (binaryOp) { + result = lhs.apply(*binaryOp, rhs); + } } virtual void visit(const eval::operation::Add &) override { result = lhs.add(rhs); diff --git a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h index aba3665d98a..7e1bd903626 100644 --- a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h +++ b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h @@ -22,6 +22,7 @@ public: ValueType type_of(const Tensor &tensor) const override; bool equal(const Tensor &a, const Tensor &b) const override; vespalib::string to_string(const Tensor &tensor) const override; + TensorSpec to_spec(const Tensor &tensor) const override; std::unique_ptr<Tensor> create(const TensorSpec &spec) const override; const Value &reduce(const Tensor &tensor, const BinaryOperation &op, const std::vector<vespalib::string> &dimensions, Stash &stash) const override; diff --git a/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt index e80083056ca..c965eb6609c 100644 --- a/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt +++ b/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt @@ -1,9 +1,9 @@ # Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. vespa_add_library(vespalib_vespalib_tensor_dense OBJECT SOURCES + direct_dense_tensor_builder.cpp dense_tensor.cpp + dense_tensor_address_combiner.cpp dense_tensor_builder.cpp - dense_tensor_dimension_sum.cpp - dense_tensor_product.cpp DEPENDS ) diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp index 108db088124..5e81e9cb05d 100644 --- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp +++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp @@ -2,14 +2,17 @@ #include <vespa/fastos/fastos.h> #include "dense_tensor.h" -#include "dense_tensor_dimension_sum.h" -#include "dense_tensor_product.h" +#include "dense_tensor_apply.hpp" +#include "dense_tensor_reduce.hpp" #include <vespa/vespalib/util/stringfmt.h> #include <vespa/vespalib/util/exceptions.h> #include <vespa/vespalib/stllike/asciistream.h> +#include <vespa/vespalib/tensor/tensor_address_builder.h> #include <vespa/vespalib/tensor/tensor_visitor.h> +#include <vespa/vespalib/eval/operation.h> #include <sstream> +using vespalib::eval::TensorSpec; namespace vespalib { namespace tensor { @@ -120,6 +123,15 @@ joinDenseTensorsNegated(const DenseTensor &lhs, std::move(cells)); } +std::vector<vespalib::string> +getDimensions(const DenseTensor &tensor) +{ + std::vector<vespalib::string> dimensions; + for (const auto &dimMeta : tensor.dimensionsMeta()) { + dimensions.emplace_back(dimMeta.dimension()); + } + return dimensions; +} } @@ -209,10 +221,9 @@ DenseTensor::add(const Tensor &arg) const if (!rhs) { return Tensor::UP(); } - checkDimensions(*this, *rhs, "add"); - return joinDenseTensors(*this, *rhs, - [](double lhsValue, double rhsValue) - { return lhsValue + rhsValue; }); + return dense::apply(*this, *rhs, + [](double lhsValue, double rhsValue) + { return lhsValue + rhsValue; }); } Tensor::UP @@ -222,11 +233,9 @@ DenseTensor::subtract(const Tensor &arg) const if (!rhs) { return Tensor::UP(); } - // Note that - *rhsCellItr is passed to the lambda function, that is why we do addition. - checkDimensions(*this, *rhs, "subtract"); - return joinDenseTensorsNegated(*this, *rhs, - [](double lhsValue, double rhsValue) - { return lhsValue + rhsValue; }); + return dense::apply(*this, *rhs, + [](double lhsValue, double rhsValue) + { return lhsValue - rhsValue; }); } Tensor::UP @@ -236,7 +245,8 @@ DenseTensor::multiply(const Tensor &arg) const if (!rhs) { return Tensor::UP(); } - return DenseTensorProduct(*this, *rhs).result(); + return dense::apply(*this, *rhs, [](double lhsValue, double rhsValue) + { return lhsValue * rhsValue; }); } Tensor::UP @@ -246,10 +256,9 @@ DenseTensor::min(const Tensor &arg) const if (!rhs) { return Tensor::UP(); } - checkDimensions(*this, *rhs, "min"); - return joinDenseTensors(*this, *rhs, - [](double lhsValue, double rhsValue) - { return std::min(lhsValue, rhsValue); }); + return dense::apply(*this, *rhs, + [](double lhsValue, double rhsValue) + { return std::min(lhsValue, rhsValue); }); } Tensor::UP @@ -259,10 +268,9 @@ DenseTensor::max(const Tensor &arg) const if (!rhs) { return Tensor::UP(); } - checkDimensions(*this, *rhs, "max"); - return joinDenseTensors(*this, *rhs, - [](double lhsValue, double rhsValue) - { return std::max(lhsValue,rhsValue); }); + return dense::apply(*this, *rhs, + [](double lhsValue, double rhsValue) + { return std::max(lhsValue, rhsValue); }); } Tensor::UP @@ -295,7 +303,9 @@ DenseTensor::apply(const CellFunction &func) const Tensor::UP DenseTensor::sum(const vespalib::string &dimension) const { - return DenseTensorDimensionSum(*this, dimension).result(); + return dense::reduce(*this, { dimension }, + [](double lhsValue, double rhsValue) + { return lhsValue + rhsValue; }); } bool @@ -322,6 +332,33 @@ DenseTensor::clone() const return std::make_unique<DenseTensor>(_dimensionsMeta, _cells); } +namespace { + +void +buildAddress(const DenseTensor::CellsIterator &itr, TensorSpec::Address &address) +{ + auto addressItr = itr.address().begin(); + for (const auto &dim : itr.dimensions()) { + address.emplace(std::make_pair(dim.dimension(), TensorSpec::Label(*addressItr++))); + } + assert(addressItr == itr.address().end()); +} + +} + +TensorSpec +DenseTensor::toSpec() const +{ + TensorSpec result(getType().to_spec()); + TensorSpec::Address address; + for (CellsIterator itr(_dimensionsMeta, _cells); itr.valid(); itr.next()) { + buildAddress(itr, address); + result.add(address, itr.cell()); + address.clear(); + } + return result; +} + void DenseTensor::print(std::ostream &out) const { @@ -375,5 +412,27 @@ operator<<(std::ostream &out, const DenseTensor::DimensionMeta &value) return out; } +Tensor::UP +DenseTensor::apply(const eval::BinaryOperation &op, const Tensor &arg) const +{ + const DenseTensor *rhs = dynamic_cast<const DenseTensor *>(&arg); + if (!rhs) { + return Tensor::UP(); + } + return dense::apply(*this, *rhs, + [&op](double lhsValue, double rhsValue) + { return op.eval(lhsValue, rhsValue); }); +} + +Tensor::UP +DenseTensor::reduce(const eval::BinaryOperation &op, + const std::vector<vespalib::string> &dimensions) const +{ + return dense::reduce(*this, + (dimensions.empty() ? getDimensions(*this) : dimensions), + [&op](double lhsValue, double rhsValue) + { return op.eval(lhsValue, rhsValue); }); +} + } // namespace vespalib::tensor } // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h index 73d9c26c408..104fddeee7e 100644 --- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h +++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h @@ -9,7 +9,8 @@ namespace vespalib { namespace tensor { /** - * TODO + * A dense tensor where all dimensions are indexed. + * Tensor cells are stored in an underlying array according to the order of the dimensions. */ class DenseTensor : public Tensor { @@ -69,6 +70,7 @@ public: void next(); double cell() const { return _cells[_cellIdx]; } const std::vector<size_t> &address() const { return _address; } + const DimensionsMeta &dimensions() const { return _dimensionsMeta; } }; @@ -99,10 +101,16 @@ public: virtual Tensor::UP match(const Tensor &arg) const override; virtual Tensor::UP apply(const CellFunction &func) const override; virtual Tensor::UP sum(const vespalib::string &dimension) const override; + virtual Tensor::UP apply(const eval::BinaryOperation &op, + const Tensor &arg) const override; + virtual Tensor::UP reduce(const eval::BinaryOperation &op, + const std::vector<vespalib::string> &dimensions) + const override; virtual bool equals(const Tensor &arg) const override; virtual void print(std::ostream &out) const override; virtual vespalib::string toString() const override; virtual Tensor::UP clone() const override; + virtual eval::TensorSpec toSpec() const override; virtual void accept(TensorVisitor &visitor) const override; }; diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp new file mode 100644 index 00000000000..88fe86ca9e6 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp @@ -0,0 +1,120 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/fastos/fastos.h> +#include "dense_tensor_address_combiner.h" +#include <vespa/vespalib/util/exceptions.h> +#include <vespa/vespalib/util/stringfmt.h> + +namespace vespalib { +namespace tensor { + +using Address = DenseTensorAddressCombiner::Address; +using DimensionMeta = DenseTensor::DimensionMeta; +using DimensionsMeta = DenseTensorAddressCombiner::DimensionsMeta; + +namespace { + +class AddressReader +{ +private: + const Address &_address; + size_t _idx; + +public: + AddressReader(const Address &address) + : _address(address), + _idx(0) + {} + size_t nextLabel() { + return _address[_idx++]; + } + bool valid() { + return _idx < _address.size(); + } +}; + +} + +DenseTensorAddressCombiner::DenseTensorAddressCombiner(const DimensionsMeta &lhs, + const DimensionsMeta &rhs) + : _ops(), + _combinedAddress() +{ + auto rhsItr = rhs.cbegin(); + auto rhsItrEnd = rhs.cend(); + for (const auto &lhsDim : lhs) { + while ((rhsItr != rhsItrEnd) && (rhsItr->dimension() < lhsDim.dimension())) { + _ops.push_back(AddressOp::RHS); + ++rhsItr; + } + if ((rhsItr != rhsItrEnd) && (rhsItr->dimension() == lhsDim.dimension())) { + _ops.push_back(AddressOp::BOTH); + ++rhsItr; + } else { + _ops.push_back(AddressOp::LHS); + } + } + while (rhsItr != rhsItrEnd) { + _ops.push_back(AddressOp::RHS); + ++rhsItr; + } +} + +bool +DenseTensorAddressCombiner::combine(const CellsIterator &lhsItr, + const CellsIterator &rhsItr) +{ + _combinedAddress.clear(); + AddressReader lhsReader(lhsItr.address()); + AddressReader rhsReader(rhsItr.address()); + for (const auto &op : _ops) { + switch (op) { + case AddressOp::LHS: + _combinedAddress.emplace_back(lhsReader.nextLabel()); + break; + case AddressOp::RHS: + _combinedAddress.emplace_back(rhsReader.nextLabel()); + break; + case AddressOp::BOTH: + size_t lhsLabel = lhsReader.nextLabel(); + size_t rhsLabel = rhsReader.nextLabel(); + if (lhsLabel != rhsLabel) { + return false; + } + _combinedAddress.emplace_back(lhsLabel); + } + } + assert(!lhsReader.valid()); + assert(!rhsReader.valid()); + return true; +} + +DimensionsMeta +DenseTensorAddressCombiner::combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs) +{ + // NOTE: both lhs and rhs are sorted according to dimension names. + DimensionsMeta result; + auto lhsItr = lhs.cbegin(); + auto rhsItr = rhs.cbegin(); + while (lhsItr != lhs.end() && rhsItr != rhs.end()) { + if (lhsItr->dimension() == rhsItr->dimension()) { + result.emplace_back(DimensionMeta(lhsItr->dimension(), std::min(lhsItr->size(), rhsItr->size()))); + ++lhsItr; + ++rhsItr; + } else if (lhsItr->dimension() < rhsItr->dimension()) { + result.emplace_back(*lhsItr++); + } else { + result.emplace_back(*rhsItr++); + } + } + while (lhsItr != lhs.end()) { + result.emplace_back(*lhsItr++); + } + while (rhsItr != rhs.end()) { + result.emplace_back(*rhsItr++); + } + return result; +} + +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h new file mode 100644 index 00000000000..2c7f9e61223 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h @@ -0,0 +1,46 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <vespa/vespalib/tensor/dense/dense_tensor.h> + +namespace vespalib { +namespace tensor { + +/** + * Combines two dense tensor addresses to a new tensor address. + * The resulting dimensions is the union of the input dimensions and + * common dimensions must have matching labels. + */ +class DenseTensorAddressCombiner +{ +public: + using Address = std::vector<size_t>; + using DimensionsMeta = DenseTensor::DimensionsMeta; + +private: + enum class AddressOp { + LHS, + RHS, + BOTH + }; + + using CellsIterator = DenseTensor::CellsIterator; + + std::vector<AddressOp> _ops; + Address _combinedAddress; + +public: + DenseTensorAddressCombiner(const DimensionsMeta &lhs, + const DimensionsMeta &rhs); + + bool combine(const CellsIterator &lhsItr, + const CellsIterator &rhsItr); + const Address &address() const { return _combinedAddress; } + + static DimensionsMeta combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs); + +}; + +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h new file mode 100644 index 00000000000..307e1db43d3 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h @@ -0,0 +1,25 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace vespalib { +namespace tensor { + +class Tensor; +class DenseTensor; + +namespace dense { + +/** + * Creates a new tensor using all combinations of input tensor cells with matching + * labels for common dimensions, using func to calculate new cell value + * based on the cell values in the input tensors. + */ +template <typename Function> +std::unique_ptr<Tensor> +apply(const DenseTensor &lhs, const DenseTensor &rhs, Function &&func); + +} // namespace vespalib::tensor::dense +} // namespace vespalib::tensor +} // namespace vespalib + diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp new file mode 100644 index 00000000000..3168089b941 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp @@ -0,0 +1,32 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "dense_tensor_apply.h" +#include "dense_tensor_address_combiner.h" +#include "direct_dense_tensor_builder.h" + +namespace vespalib { +namespace tensor { +namespace dense { + +template <typename Function> +std::unique_ptr<Tensor> +apply(const DenseTensor &lhs, const DenseTensor &rhs, Function &&func) +{ + DenseTensorAddressCombiner combiner(lhs.dimensionsMeta(), rhs.dimensionsMeta()); + DirectDenseTensorBuilder builder(DenseTensorAddressCombiner::combineDimensions(lhs.dimensionsMeta(), rhs.dimensionsMeta())); + for (DenseTensor::CellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) { + for (DenseTensor::CellsIterator rhsItr = rhs.cellsIterator(); rhsItr.valid(); rhsItr.next()) { + bool combineSuccess = combiner.combine(lhsItr, rhsItr); + if (combineSuccess) { + builder.insertCell(combiner.address(), func(lhsItr.cell(), rhsItr.cell())); + } + } + } + return builder.build(); +} + +} // namespace vespalib::tensor::dense +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp deleted file mode 100644 index f94c9137798..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "dense_tensor_dimension_sum.h" - -namespace vespalib { -namespace tensor { - -using DimensionsMeta = DenseTensor::DimensionsMeta; - -namespace { - -DimensionsMeta -removeDimension(const DimensionsMeta &dimensionsMeta, - const string &dimension) -{ - DimensionsMeta result = dimensionsMeta; - auto itr = std::lower_bound(result.begin(), result.end(), dimension, - [](const auto &dimMeta, const auto &dimension_in) - { return dimMeta.dimension() < dimension_in; }); - if ((itr != result.end()) && (itr->dimension() == dimension)) { - result.erase(itr); - } - return result; -} - -size_t -calcCellsSize(const DimensionsMeta &dimensionsMeta) -{ - size_t cellsSize = 1; - for (const auto &dimMeta : dimensionsMeta) { - cellsSize *= dimMeta.size(); - } - return cellsSize; -} - -struct DimensionSummer -{ - size_t _innerDimSize; - size_t _sumDimSize; - size_t _outerDimSize; - using Cells = DenseTensor::Cells; - - DimensionSummer(const DimensionsMeta &dimensionsMeta, - const string &dimension) - : _innerDimSize(1), - _sumDimSize(1), - _outerDimSize(1) - { - auto itr = std::lower_bound(dimensionsMeta.cbegin(), dimensionsMeta.cend(), dimension, - [](const auto &dimMeta, const auto &dimension_in) - { return dimMeta.dimension() < dimension_in; }); - if ((itr != dimensionsMeta.end()) && (itr->dimension() == dimension)) { - for (auto outerItr = dimensionsMeta.cbegin(); outerItr != itr; ++outerItr) { - _outerDimSize *= outerItr->size(); - } - _sumDimSize = itr->size(); - for (++itr; itr != dimensionsMeta.cend(); ++itr) { - _innerDimSize *= itr->size(); - } - } else { - _outerDimSize = calcCellsSize(dimensionsMeta); - } - } - - void - sumCells(Cells &cells, const Cells &cells_in) const - { - auto itr_in = cells_in.cbegin(); - auto itr = cells.begin(); - for (size_t outerDim = 0; outerDim < _outerDimSize; - ++outerDim) { - auto saved_itr = itr; - for (size_t sumDim = 0; sumDim < _sumDimSize; ++sumDim) { - itr = saved_itr; - for (size_t innerDim = 0; innerDim < _innerDimSize; - ++innerDim) { - *itr += *itr_in; - ++itr; - ++itr_in; - } - } - } - assert(itr == cells.end()); - assert(itr_in == cells_in.cend()); - } -}; - - -} - - -DenseTensorDimensionSum::DenseTensorDimensionSum(const TensorImplType &tensor, - const string &dimension) - : _dimensionsMeta(removeDimension(tensor.dimensionsMeta(), - dimension)), - _cells(calcCellsSize(_dimensionsMeta)) -{ - DimensionSummer dimensionSummer(tensor.dimensionsMeta(), - dimension); - dimensionSummer.sumCells(_cells, tensor.cells()); -} - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h deleted file mode 100644 index c61e07d5c3a..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "dense_tensor.h" - -namespace vespalib { -namespace tensor { - -/** - * Returns a tensor with the given dimension removed and the cell values in that dimension summed. - */ -class DenseTensorDimensionSum -{ -public: - using TensorImplType = DenseTensor; -private: - using DimensionMeta = DenseTensor::DimensionMeta; - using DimensionsMeta = DenseTensor::DimensionsMeta; - using Cells = DenseTensor::Cells; - - DimensionsMeta _dimensionsMeta; - Cells _cells; - -public: - DenseTensorDimensionSum(const TensorImplType &tensor, - const vespalib::string &dimension); - - Tensor::UP result() { - return std::make_unique<DenseTensor>(std::move(_dimensionsMeta), - std::move(_cells)); - } -}; - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp deleted file mode 100644 index fff5f21d3d1..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "dense_tensor_product.h" -#include <vespa/vespalib/util/exceptions.h> - -namespace vespalib { -namespace tensor { - -using DimensionsMeta = DenseTensor::DimensionsMeta; -using CellsIterator = DenseTensor::CellsIterator; -using Address = std::vector<size_t>; - -using vespalib::IllegalArgumentException; -using vespalib::make_string; - -namespace { - -enum class AddressCombineOp -{ - LHS, - RHS, - BOTH -}; - -using AddressCombineOps = std::vector<AddressCombineOp>; - -class AddressReader -{ -private: - const Address &_address; - size_t _idx; - -public: - AddressReader(const Address &address) - : _address(address), - _idx(0) - {} - size_t nextLabel() { - return _address[_idx++]; - } - bool valid() { - return _idx < _address.size(); - } -}; - -class CellsInserter -{ -private: - const DimensionsMeta &_dimensionsMeta; - DenseTensor::Cells &_cells; - - size_t calculateCellAddress(const Address &address) { - assert(address.size() == _dimensionsMeta.size()); - size_t result = 0; - for (size_t i = 0; i < address.size(); ++i) { - result *= _dimensionsMeta[i].size(); - result += address[i]; - } - return result; - } - -public: - CellsInserter(const DimensionsMeta &dimensionsMeta, - DenseTensor::Cells &cells) - : _dimensionsMeta(dimensionsMeta), - _cells(cells) - {} - void insertCell(const Address &address, double cellValue) { - size_t cellAddress = calculateCellAddress(address); - assert(cellAddress < _cells.size()); - _cells[cellAddress] = cellValue; - } -}; - -void -validateDimensionsMeta(const DimensionsMeta &dimensionsMeta) -{ - for (size_t i = 1; i < dimensionsMeta.size(); ++i) { - const auto &prevDimMeta = dimensionsMeta[i-1]; - const auto &currDimMeta = dimensionsMeta[i]; - if ((prevDimMeta.dimension() == currDimMeta.dimension()) && - (prevDimMeta.size() != currDimMeta.size())) { - throw IllegalArgumentException(make_string( - "Shared dimension '%s' in dense tensor product has mis-matching label ranges: " - "[0, %zu> vs [0, %zu>. This is not supported.", - prevDimMeta.dimension().c_str(), prevDimMeta.size(), currDimMeta.size())); - } - } -} - -DimensionsMeta -combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs) -{ - DimensionsMeta result; - std::set_union(lhs.cbegin(), lhs.cend(), - rhs.cbegin(), rhs.cend(), - std::back_inserter(result)); - validateDimensionsMeta(result); - return result; -} - -size_t -calculateCellsSize(const DimensionsMeta &dimensionsMeta) -{ - size_t cellsSize = 1; - for (const auto &dimMeta : dimensionsMeta) { - cellsSize *= dimMeta.size(); - } - return cellsSize; -} - -AddressCombineOps -buildCombineOps(const DimensionsMeta &lhs, - const DimensionsMeta &rhs) -{ - AddressCombineOps ops; - auto rhsItr = rhs.cbegin(); - auto rhsItrEnd = rhs.cend(); - for (const auto &lhsDim : lhs) { - while ((rhsItr != rhsItrEnd) && (rhsItr->dimension() < lhsDim.dimension())) { - ops.push_back(AddressCombineOp::RHS); - ++rhsItr; - } - if ((rhsItr != rhsItrEnd) && (rhsItr->dimension() == lhsDim.dimension())) { - ops.push_back(AddressCombineOp::BOTH); - ++rhsItr; - } else { - ops.push_back(AddressCombineOp::LHS); - } - } - while (rhsItr != rhsItrEnd) { - ops.push_back(AddressCombineOp::RHS); - ++rhsItr; - } - return ops; -} - -bool -combineAddress(Address &combinedAddress, - const CellsIterator &lhsItr, - const CellsIterator &rhsItr, - const AddressCombineOps &ops) -{ - combinedAddress.clear(); - AddressReader lhsReader(lhsItr.address()); - AddressReader rhsReader(rhsItr.address()); - for (const auto &op : ops) { - switch (op) { - case AddressCombineOp::LHS: - combinedAddress.emplace_back(lhsReader.nextLabel()); - break; - case AddressCombineOp::RHS: - combinedAddress.emplace_back(rhsReader.nextLabel()); - break; - case AddressCombineOp::BOTH: - size_t lhsLabel = lhsReader.nextLabel(); - size_t rhsLabel = rhsReader.nextLabel(); - if (lhsLabel != rhsLabel) { - return false; - } - combinedAddress.emplace_back(lhsLabel); - } - } - assert(!lhsReader.valid()); - assert(!rhsReader.valid()); - return true; -} - -} - -void -DenseTensorProduct::bruteForceProduct(const DenseTensor &lhs, - const DenseTensor &rhs) -{ - AddressCombineOps ops = buildCombineOps(lhs.dimensionsMeta(), rhs.dimensionsMeta()); - Address combinedAddress; - CellsInserter cellsInserter(_dimensionsMeta, _cells); - for (CellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) { - for (CellsIterator rhsItr = rhs.cellsIterator(); rhsItr.valid(); rhsItr.next()) { - bool combineSuccess = combineAddress(combinedAddress, lhsItr, rhsItr, ops); - if (combineSuccess) { - cellsInserter.insertCell(combinedAddress, lhsItr.cell() * rhsItr.cell()); - } - } - } -} - -DenseTensorProduct::DenseTensorProduct(const DenseTensor &lhs, - const DenseTensor &rhs) - : _dimensionsMeta(combineDimensions(lhs.dimensionsMeta(), rhs.dimensionsMeta())), - _cells(calculateCellsSize(_dimensionsMeta)) -{ - bruteForceProduct(lhs, rhs); -} - -Tensor::UP -DenseTensorProduct::result() -{ - return std::make_unique<DenseTensor>(std::move(_dimensionsMeta), std::move(_cells)); -} - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h deleted file mode 100644 index 5615067119b..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "dense_tensor.h" -#include <vespa/vespalib/tensor/tensor_operation.h> - -namespace vespalib { -namespace tensor { - -/** - * Returns the tensor product of the two given dense tensors. - * This is all combinations of all cells in the first tensor with all cells of - * the second tensor. - * - * Shared dimensions must have the same label range from [0, dimSize>. - */ -class DenseTensorProduct -{ -private: - DenseTensor::DimensionsMeta _dimensionsMeta; - DenseTensor::Cells _cells; - - void bruteForceProduct(const DenseTensor &lhs, const DenseTensor &rhs); - -public: - DenseTensorProduct(const DenseTensor &lhs, const DenseTensor &rhs); - Tensor::UP result(); -}; - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h new file mode 100644 index 00000000000..ce3bf308fd3 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h @@ -0,0 +1,21 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "dense_tensor.h" + +namespace vespalib { +namespace tensor { +namespace dense { + +/** + * Returns a tensor with the given dimension(s) removed and the cell values in that dimension(s) + * combined using the given func. + */ +template<typename Function> +std::unique_ptr<Tensor> +reduce(const DenseTensor &tensor, const std::vector<vespalib::string> &dimensions, Function &&func); + +} // namespace dense +} // namespace tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp new file mode 100644 index 00000000000..e2af832f068 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp @@ -0,0 +1,138 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/fastos/fastos.h> +#include "dense_tensor_reduce.h" + +namespace vespalib { +namespace tensor { +namespace dense { + +using Cells = DenseTensor::Cells; +using DimensionsMeta = DenseTensor::DimensionsMeta; + +namespace { + +DimensionsMeta +removeDimension(const DimensionsMeta &dimensionsMeta, + const string &dimensionToRemove) +{ + DimensionsMeta result = dimensionsMeta; + auto itr = std::lower_bound(result.begin(), result.end(), dimensionToRemove, + [](const auto &dimMeta, const auto &dimension_in) { + return dimMeta.dimension() < dimension_in; + }); + if ((itr != result.end()) && (itr->dimension() == dimensionToRemove)) { + result.erase(itr); + } + return result; +} + +size_t +calcCellsSize(const DimensionsMeta &dimensionsMeta) +{ + size_t cellsSize = 1; + for (const auto &dimMeta : dimensionsMeta) { + cellsSize *= dimMeta.size(); + } + return cellsSize; +} + + +class DimensionReducer +{ +private: + DimensionsMeta _dimensionsResult; + Cells _cellsResult; + size_t _innerDimSize; + size_t _sumDimSize; + size_t _outerDimSize; + + void setup(const DimensionsMeta &dimensions, + const vespalib::string &dimensionToRemove) { + auto itr = std::lower_bound(dimensions.cbegin(), dimensions.cend(), dimensionToRemove, + [](const auto &dimMeta, const auto &dimension) { + return dimMeta.dimension() < dimension; + }); + if ((itr != dimensions.end()) && (itr->dimension() == dimensionToRemove)) { + for (auto outerItr = dimensions.cbegin(); outerItr != itr; ++outerItr) { + _outerDimSize *= outerItr->size(); + } + _sumDimSize = itr->size(); + for (++itr; itr != dimensions.cend(); ++itr) { + _innerDimSize *= itr->size(); + } + } else { + _outerDimSize = calcCellsSize(dimensions); + } + } + +public: + DimensionReducer(const DimensionsMeta &dimensions, + const string &dimensionToRemove) + : _dimensionsResult(removeDimension(dimensions, dimensionToRemove)), + _cellsResult(calcCellsSize(_dimensionsResult)), + _innerDimSize(1), + _sumDimSize(1), + _outerDimSize(1) + { + setup(dimensions, dimensionToRemove); + } + + template <typename Function> + DenseTensor::UP + reduceCells(const Cells &cellsIn, Function &&func) { + auto itr_in = cellsIn.cbegin(); + auto itr_out = _cellsResult.begin(); + for (size_t outerDim = 0; outerDim < _outerDimSize; ++outerDim) { + auto saved_itr = itr_out; + for (size_t innerDim = 0; innerDim < _innerDimSize; ++innerDim) { + *itr_out = *itr_in; + ++itr_out; + ++itr_in; + } + for (size_t sumDim = 1; sumDim < _sumDimSize; ++sumDim) { + itr_out = saved_itr; + for (size_t innerDim = 0; innerDim < _innerDimSize; ++innerDim) { + *itr_out = func(*itr_out, *itr_in); + ++itr_out; + ++itr_in; + } + } + } + assert(itr_out == _cellsResult.end()); + assert(itr_in == cellsIn.cend()); + return std::make_unique<DenseTensor>(std::move(_dimensionsResult), std::move(_cellsResult)); + } +}; + +template <typename Function> +DenseTensor::UP +reduce(const DenseTensor &tensor, const vespalib::string &dimensionToRemove, Function &&func) +{ + DimensionReducer reducer(tensor.dimensionsMeta(), dimensionToRemove); + return reducer.reduceCells(tensor.cells(), func); +} + +} + +template <typename Function> +std::unique_ptr<Tensor> +reduce(const DenseTensor &tensor, const std::vector<vespalib::string> &dimensions, Function &&func) +{ + if (dimensions.size() == 1) { + return reduce(tensor, dimensions[0], func); + } else if (dimensions.size() > 0) { + DenseTensor::UP result = reduce(tensor, dimensions[0], func); + for (size_t i = 1; i < dimensions.size(); ++i) { + DenseTensor::UP tmpResult = reduce(*result, dimensions[i], func); + result = std::move(tmpResult); + } + return result; + } else { + return std::unique_ptr<Tensor>(); + } +} + +} // namespace dense +} // namespace tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp new file mode 100644 index 00000000000..dd1682fb451 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp @@ -0,0 +1,59 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/fastos/fastos.h> +#include "direct_dense_tensor_builder.h" + +namespace vespalib { +namespace tensor { + +using Address = DirectDenseTensorBuilder::Address; +using DimensionsMeta = DirectDenseTensorBuilder::DimensionsMeta; + +namespace { + +size_t +calculateCellsSize(const DimensionsMeta &dimensionsMeta) +{ + size_t cellsSize = 1; + for (const auto &dimMeta : dimensionsMeta) { + cellsSize *= dimMeta.size(); + } + return cellsSize; +} + +size_t +calculateCellAddress(const Address &address, const DimensionsMeta &dimensionsMeta) +{ + assert(address.size() == dimensionsMeta.size()); + size_t result = 0; + for (size_t i = 0; i < address.size(); ++i) { + result *= dimensionsMeta[i].size(); + result += address[i]; + } + return result; +} + +} + +DirectDenseTensorBuilder::DirectDenseTensorBuilder(const DimensionsMeta &dimensionsMeta) + : _dimensionsMeta(dimensionsMeta), + _cells(calculateCellsSize(_dimensionsMeta)) +{ +} + +void +DirectDenseTensorBuilder::insertCell(const Address &address, double cellValue) +{ + size_t cellAddress = calculateCellAddress(address, _dimensionsMeta); + assert(cellAddress < _cells.size()); + _cells[cellAddress] = cellValue; +} + +Tensor::UP +DirectDenseTensorBuilder::build() +{ + return std::make_unique<DenseTensor>(std::move(_dimensionsMeta), std::move(_cells)); +} + +} // namespace tensor +} // namesapce vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h new file mode 100644 index 00000000000..74234f1cabe --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h @@ -0,0 +1,31 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "dense_tensor.h" + +namespace vespalib { +namespace tensor { + +/** + * Class for building a dense tensor by inserting cell values directly into underlying array of cells. + */ +class DirectDenseTensorBuilder +{ +public: + using DimensionsMeta = DenseTensor::DimensionsMeta; + using Cells = DenseTensor::Cells; + using Address = std::vector<size_t>; + +private: + DimensionsMeta _dimensionsMeta; + Cells _cells; + +public: + DirectDenseTensorBuilder(const DimensionsMeta &dimensionsMeta); + void insertCell(const Address &address, double cellValue); + Tensor::UP build(); +}; + +} // namespace tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h b/vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h deleted file mode 100644 index f23c4b6e20f..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -namespace vespalib { -namespace tensor { - -using DimensionsVector = std::vector<vespalib::stringref>; - -/** - * An iterator for a dimensions vector used to simplify 3-way merge - * between two tensor addresses and a dimension vector. - */ -class DimensionsVectorIterator -{ - using InnerIterator = DimensionsVector::const_iterator; - InnerIterator _itr; - InnerIterator _itrEnd; -public: - DimensionsVectorIterator(const DimensionsVector &dimensions) - : _itr(dimensions.cbegin()), - _itrEnd(dimensions.cend()) - { - } - bool valid() const { return (_itr != _itrEnd); } - vespalib::stringref dimension() const { return *_itr; } - template <typename Iterator> - bool beforeDimension(const Iterator &rhs) const { - if (!valid()) { - return false; - } - if (!rhs.valid()) { - return true; - } - return (*_itr < rhs.dimension()); - } - bool atDimension(vespalib::stringref rhsDimension) const - { - return (valid() && (*_itr == rhsDimension)); - } - void next() { ++_itr; } -}; - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h b/vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h deleted file mode 100644 index d691732b800..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -namespace vespalib { -namespace tensor { - -/* - * Combine two tensor addresses, but fail if dimension label doesn't match - * for common dimensions. Use 3-way merge between two tensors and a vector - * of dimensions. To be used when we have few common dimensions. - * The commonDimensions parameter is the intersection of the - * dimensions in the two input tensors. - */ -template <class AddressBuilder, class LhsAddress, class RhsAddress> -bool -joinTensorAddresses(AddressBuilder &combined, - const DimensionsVector &commonDimensions, - const LhsAddress &lhs, - const RhsAddress &rhs) -{ - TensorAddressElementIterator<LhsAddress> lhsItr(lhs); - TensorAddressElementIterator<RhsAddress> rhsItr(rhs); - DimensionsVectorIterator dimsItr(commonDimensions); - combined.clear(); - while (lhsItr.valid()) { - while (dimsItr.beforeDimension(lhsItr)) { - rhsItr.addElements(combined, dimsItr); - if (rhsItr.atDimension(dimsItr.dimension())) { - // needed dimension missing from lhs - return false; - } - dimsItr.next(); - } - if (dimsItr.atDimension(lhsItr.dimension())) { - rhsItr.addElements(combined, dimsItr); - if (!rhsItr.atDimension(dimsItr.dimension())) { - // needed dimension missing from rhs - return false; - } - if (lhsItr.label() != rhsItr.label()) { - // dimension exists in both rhs and lhs, but labels don't match - return false; - } - // common dimension, labels match - lhsItr.addElement(combined); - lhsItr.next(); - rhsItr.next(); - dimsItr.next(); - continue; - } - rhsItr.addElements(combined, lhsItr); - assert(lhsItr.beforeDimension(rhsItr)); - lhsItr.addElement(combined); - lhsItr.next(); - } - while (dimsItr.valid()) { - rhsItr.addElements(combined, dimsItr); - if (rhsItr.atDimension(dimsItr.dimension())) { - // needed dimension missing from lhs - return false; - } - dimsItr.next(); - } - rhsItr.addElements(combined); - // All matching - return true; -} - -/* - * Combine two tensor addresses, but fail if dimension label doesn't match - * for common dimensions. Use 3-way merge between two tensors and a vector - * of dimensions. To be used when we have many common dimensions. - * The commonDimensions parameter is the intersection of the - * dimensions in the two input tensors. - */ -template <class AddressBuilder, class LhsAddress, class RhsAddress> -bool -joinTensorAddresses(AddressBuilder &combined, - const DimensionsSet &commonDimensions, - const LhsAddress &lhs, - const RhsAddress &rhs) -{ - TensorAddressElementIterator<LhsAddress> lhsItr(lhs); - TensorAddressElementIterator<RhsAddress> rhsItr(rhs); - combined.clear(); - if (lhsItr.valid() && rhsItr.valid()) { - for (;;) { - if (lhsItr.beforeDimension(rhsItr)) { - if (!lhsItr.addElements(combined, commonDimensions, rhsItr)) { - return false; - } - if (!lhsItr.valid()) { - break; - } - } - if (lhsItr.dimension() == rhsItr.dimension()) { - if (lhsItr.label() != rhsItr.label()) { - return false; - } - lhsItr.addElement(combined); - lhsItr.next(); - rhsItr.next(); - if (!lhsItr.valid() || !rhsItr.valid()) { - break; - } - continue; - } - if (!rhsItr.addElements(combined, commonDimensions, lhsItr)) { - return false; - } - if (!rhsItr.valid()) { - break; - } - } - } - if (!lhsItr.addElements(combined, commonDimensions)) { - return false; - } - if (!rhsItr.addElements(combined, commonDimensions)) { - return false; - } - // All matching - return true; -} - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/serialization/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/serialization/CMakeLists.txt index 246e1b17535..1f178dd7118 100644 --- a/vespalib/src/vespa/vespalib/tensor/serialization/CMakeLists.txt +++ b/vespalib/src/vespa/vespalib/tensor/serialization/CMakeLists.txt @@ -1,7 +1,7 @@ # Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. vespa_add_library(vespalib_vespalib_tensor_serialization OBJECT SOURCES - compact_binary_format.cpp + sparse_binary_format.cpp dense_binary_format.cpp slime_binary_format.cpp typed_binary_format.cpp diff --git a/vespalib/src/vespa/vespalib/tensor/serialization/compact_binary_format.cpp b/vespalib/src/vespa/vespalib/tensor/serialization/sparse_binary_format.cpp index 48b5cb971d6..af7a92d2c68 100644 --- a/vespalib/src/vespa/vespalib/tensor/serialization/compact_binary_format.cpp +++ b/vespalib/src/vespa/vespalib/tensor/serialization/sparse_binary_format.cpp @@ -1,7 +1,7 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/fastos/fastos.h> -#include "compact_binary_format.h" +#include "sparse_binary_format.h" #include <vespa/vespalib/tensor/types.h> #include <vespa/vespalib/tensor/tensor.h> #include <vespa/vespalib/tensor/tensor_builder.h> @@ -40,20 +40,20 @@ writeTensorAddress(nbostream &output, } -class CompactBinaryFormatSerializer : public TensorVisitor +class SparseBinaryFormatSerializer : public TensorVisitor { uint32_t _numCells; nbostream _cells; eval::ValueType _type; public: - CompactBinaryFormatSerializer(); - virtual ~CompactBinaryFormatSerializer() override; + SparseBinaryFormatSerializer(); + virtual ~SparseBinaryFormatSerializer() override; virtual void visit(const TensorAddress &address, double value) override; void serialize(nbostream &stream, const Tensor &tensor); }; -CompactBinaryFormatSerializer::CompactBinaryFormatSerializer() +SparseBinaryFormatSerializer::SparseBinaryFormatSerializer() : _numCells(0u), _cells(), _type(eval::ValueType::error_type()) @@ -61,12 +61,12 @@ CompactBinaryFormatSerializer::CompactBinaryFormatSerializer() } -CompactBinaryFormatSerializer::~CompactBinaryFormatSerializer() +SparseBinaryFormatSerializer::~SparseBinaryFormatSerializer() { } void -CompactBinaryFormatSerializer::visit(const TensorAddress &address, +SparseBinaryFormatSerializer::visit(const TensorAddress &address, double value) { ++_numCells; @@ -76,7 +76,7 @@ CompactBinaryFormatSerializer::visit(const TensorAddress &address, void -CompactBinaryFormatSerializer::serialize(nbostream &stream, +SparseBinaryFormatSerializer::serialize(nbostream &stream, const Tensor &tensor) { _type = tensor.getType(); @@ -91,15 +91,15 @@ CompactBinaryFormatSerializer::serialize(nbostream &stream, void -CompactBinaryFormat::serialize(nbostream &stream, const Tensor &tensor) +SparseBinaryFormat::serialize(nbostream &stream, const Tensor &tensor) { - CompactBinaryFormatSerializer serializer; + SparseBinaryFormatSerializer serializer; serializer.serialize(stream, tensor); } void -CompactBinaryFormat::deserialize(nbostream &stream, TensorBuilder &builder) +SparseBinaryFormat::deserialize(nbostream &stream, TensorBuilder &builder) { vespalib::string str; size_t dimensionsSize = stream.getInt1_4Bytes(); diff --git a/vespalib/src/vespa/vespalib/tensor/serialization/compact_binary_format.h b/vespalib/src/vespa/vespalib/tensor/serialization/sparse_binary_format.h index fd3abdc2b71..6102c13130e 100644 --- a/vespalib/src/vespa/vespalib/tensor/serialization/compact_binary_format.h +++ b/vespalib/src/vespa/vespalib/tensor/serialization/sparse_binary_format.h @@ -14,7 +14,7 @@ class TensorBuilder; /** * Class for serializing a tensor. */ -class CompactBinaryFormat +class SparseBinaryFormat { public: static void serialize(nbostream &stream, const Tensor &tensor); diff --git a/vespalib/src/vespa/vespalib/tensor/serialization/typed_binary_format.cpp b/vespalib/src/vespa/vespalib/tensor/serialization/typed_binary_format.cpp index d3d0ae40545..c282a20fe6e 100644 --- a/vespalib/src/vespa/vespalib/tensor/serialization/typed_binary_format.cpp +++ b/vespalib/src/vespa/vespalib/tensor/serialization/typed_binary_format.cpp @@ -2,7 +2,7 @@ #include <vespa/fastos/fastos.h> #include "typed_binary_format.h" -#include "compact_binary_format.h" +#include "sparse_binary_format.h" #include "dense_binary_format.h" #include <vespa/vespalib/objects/nbostream.h> #include <vespa/vespalib/tensor/default_tensor.h> @@ -23,8 +23,8 @@ TypedBinaryFormat::serialize(nbostream &stream, const Tensor &tensor) stream.putInt1_4Bytes(DENSE_BINARY_FORMAT_TYPE); DenseBinaryFormat::serialize(stream, *denseTensor); } else { - stream.putInt1_4Bytes(COMPACT_BINARY_FORMAT_TYPE); - CompactBinaryFormat::serialize(stream, tensor); + stream.putInt1_4Bytes(SPARSE_BINARY_FORMAT_TYPE); + SparseBinaryFormat::serialize(stream, tensor); } } @@ -33,8 +33,8 @@ void TypedBinaryFormat::deserialize(nbostream &stream, TensorBuilder &builder) { auto formatId = stream.getInt1_4Bytes(); - assert(formatId == COMPACT_BINARY_FORMAT_TYPE); - CompactBinaryFormat::deserialize(stream, builder); + assert(formatId == SPARSE_BINARY_FORMAT_TYPE); + SparseBinaryFormat::deserialize(stream, builder); } @@ -42,9 +42,9 @@ std::unique_ptr<Tensor> TypedBinaryFormat::deserialize(nbostream &stream) { auto formatId = stream.getInt1_4Bytes(); - if (formatId == COMPACT_BINARY_FORMAT_TYPE) { + if (formatId == SPARSE_BINARY_FORMAT_TYPE) { DefaultTensor::builder builder; - CompactBinaryFormat::deserialize(stream, builder); + SparseBinaryFormat::deserialize(stream, builder); return builder.build(); } if (formatId == DENSE_BINARY_FORMAT_TYPE) { diff --git a/vespalib/src/vespa/vespalib/tensor/serialization/typed_binary_format.h b/vespalib/src/vespa/vespalib/tensor/serialization/typed_binary_format.h index 31bd12ddfc0..45cba6018d4 100644 --- a/vespalib/src/vespa/vespalib/tensor/serialization/typed_binary_format.h +++ b/vespalib/src/vespa/vespalib/tensor/serialization/typed_binary_format.h @@ -16,7 +16,7 @@ class TensorBuilder; */ class TypedBinaryFormat { - static constexpr uint32_t COMPACT_BINARY_FORMAT_TYPE = 1u; + static constexpr uint32_t SPARSE_BINARY_FORMAT_TYPE = 1u; static constexpr uint32_t DENSE_BINARY_FORMAT_TYPE = 2u; public: static void serialize(nbostream &stream, const Tensor &tensor); diff --git a/vespalib/src/vespa/vespalib/tensor/simple/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/simple/CMakeLists.txt deleted file mode 100644 index deeb18030e4..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/simple/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -vespa_add_library(vespalib_vespalib_tensor_simple OBJECT - SOURCES - simple_tensor.cpp - simple_tensor_builder.cpp - simple_tensor_dimension_sum.cpp - simple_tensor_product.cpp - DEPENDS -) diff --git a/vespalib/src/vespa/vespalib/tensor/simple/direct_simple_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/simple/direct_simple_tensor_builder.h deleted file mode 100644 index 525d0ef9864..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/simple/direct_simple_tensor_builder.h +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/tensor/direct_tensor_builder.h> -#include "simple_tensor.h" -#include <vespa/vespalib/tensor/tensor_address_builder.h> - -namespace vespalib { -namespace tensor { - -/** - * Utility class to build tensors of type SimpleTensor, to be used by - * tensor operations. - */ -template <> class DirectTensorBuilder<SimpleTensor> -{ -public: - using TensorImplType = SimpleTensor; - using Dimensions = typename TensorImplType::Dimensions; - using Cells = typename TensorImplType::Cells; - using AddressBuilderType = TensorAddressBuilder; - using AddressRefType = TensorAddress; - using AddressType = TensorAddress; - -private: - Dimensions _dimensions; - Cells _cells; - -public: - DirectTensorBuilder() - : _dimensions(), - _cells() - { - } - - DirectTensorBuilder(const Dimensions &dimensions_in) - : _dimensions(dimensions_in), - _cells() - { - } - - DirectTensorBuilder(const Dimensions &dimensions_in, - const Cells &cells_in) - : _dimensions(dimensions_in), - _cells(cells_in) - { - } - - Tensor::UP build() { - return std::make_unique<SimpleTensor>(std::move(_dimensions), - std::move(_cells)); - } - - template <class Function> - void insertCell(const TensorAddress &address, double value, - Function &&func) - { - auto res = _cells.insert(std::make_pair(address, value)); - if (!res.second) { - res.first->second = func(res.first->second, value); - } - } - - void insertCell(const TensorAddress &address, double value) { - // This address should not already exist and a new cell should be inserted. - insertCell(address, value, [](double, double) -> double { abort(); }); - } - - // Note: moves data from TensorAddressBuilder to new TensorAddress. - template <class Function> - void insertCell(TensorAddressBuilder &address, double value, - Function &&func) - { - auto res = - _cells.insert(std::make_pair(address.build(), value)); - if (!res.second) { - res.first->second = func(res.first->second, value); - } - } - - void insertCell(TensorAddressBuilder &address, double value) { - // This address should not already exist and a new cell should be inserted. - insertCell(address, value, [](double, double) -> double { abort(); }); - } - - Dimensions &dimensions() { return _dimensions; } - Cells &cells() { return _cells; } -}; - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor.cpp b/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor.cpp deleted file mode 100644 index bb4b363a062..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor.cpp +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "simple_tensor.h" -#include "simple_tensor_dimension_sum.h" -#include "simple_tensor_product.h" -#include <vespa/vespalib/tensor/join_tensors.h> -#include <vespa/vespalib/tensor/tensor_apply.h> -#include <sstream> -#include <vespa/vespalib/tensor/tensor_visitor.h> - -namespace vespalib { -namespace tensor { - -SimpleTensor::SimpleTensor(const Dimensions &dimensions_in, const Cells &cells_in) - : _dimensions(dimensions_in), - _cells(cells_in) -{ -} - -SimpleTensor::SimpleTensor(Dimensions &&dimensions_in, Cells &&cells_in) - : _dimensions(std::move(dimensions_in)), - _cells(std::move(cells_in)) -{ -} - -bool -SimpleTensor::operator==(const SimpleTensor &rhs) const -{ - return _dimensions == rhs._dimensions && _cells == rhs._cells; -} - -SimpleTensor::Dimensions -SimpleTensor::combineDimensionsWith(const SimpleTensor &rhs) const -{ - Dimensions result; - std::set_union(_dimensions.cbegin(), _dimensions.cend(), - rhs._dimensions.cbegin(), rhs._dimensions.cend(), - std::back_inserter(result)); - return result; -} - -eval::ValueType -SimpleTensor::getType() const -{ - if (_dimensions.empty()) { - return eval::ValueType::double_type(); - } - std::vector<eval::ValueType::Dimension> dimensions; - std::copy(_dimensions.begin(), _dimensions.end(), std::back_inserter(dimensions)); - return eval::ValueType::tensor_type(dimensions); -} - -double -SimpleTensor::sum() const -{ - double result = 0.0; - for (const auto &cell : _cells) { - result += cell.second; - } - return result; -} - -Tensor::UP -SimpleTensor::add(const Tensor &arg) const -{ - const SimpleTensor *rhs = dynamic_cast<const SimpleTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return joinTensors(*this, *rhs, - [](double lhsValue, double rhsValue) { return lhsValue + rhsValue; }); -} - -Tensor::UP -SimpleTensor::subtract(const Tensor &arg) const -{ - const SimpleTensor *rhs = dynamic_cast<const SimpleTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - // Note that -rhsCell.second is passed to the lambda function, that is why we do addition. - return joinTensorsNegated(*this, *rhs, - [](double lhsValue, double rhsValue) { return lhsValue + rhsValue; }); -} - -Tensor::UP -SimpleTensor::multiply(const Tensor &arg) const -{ - const SimpleTensor *rhs = dynamic_cast<const SimpleTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return SimpleTensorProduct(*this, *rhs).result(); -} - -Tensor::UP -SimpleTensor::min(const Tensor &arg) const -{ - const SimpleTensor *rhs = dynamic_cast<const SimpleTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return joinTensors(*this, *rhs, - [](double lhsValue, double rhsValue){ return std::min(lhsValue, rhsValue); }); -} - -Tensor::UP -SimpleTensor::max(const Tensor &arg) const -{ - const SimpleTensor *rhs = dynamic_cast<const SimpleTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - return joinTensors(*this, *rhs, - [](double lhsValue, double rhsValue){ return std::max(lhsValue, rhsValue); }); -} - -Tensor::UP -SimpleTensor::match(const Tensor &arg) const -{ - const SimpleTensor *rhs = dynamic_cast<const SimpleTensor *>(&arg); - if (!rhs) { - return Tensor::UP(); - } - DirectTensorBuilder<SimpleTensor> builder(combineDimensionsWith(*rhs)); - for (const auto &lhsCell : cells()) { - auto rhsItr = rhs->cells().find(lhsCell.first); - if (rhsItr != rhs->cells().end()) { - builder.insertCell(lhsCell.first, lhsCell.second * rhsItr->second); - } - } - return builder.build(); -} - -Tensor::UP -SimpleTensor::apply(const CellFunction &func) const -{ - return TensorApply<SimpleTensor>(*this, func).result(); -} - -Tensor::UP -SimpleTensor::sum(const vespalib::string &dimension) const -{ - return SimpleTensorDimensionSum(*this, dimension).result(); -} - -bool -SimpleTensor::equals(const Tensor &arg) const -{ - const SimpleTensor *rhs = dynamic_cast<const SimpleTensor *>(&arg); - if (!rhs) { - return false; - } - return *this == *rhs; -} - -vespalib::string -SimpleTensor::toString() const -{ - std::ostringstream stream; - stream << *this; - return stream.str(); -} - -Tensor::UP -SimpleTensor::clone() const -{ - return std::make_unique<SimpleTensor>(_dimensions, _cells); -} - -namespace { - -TensorAddress -getAddressNotFoundInCells(const SimpleTensor::Dimensions &dimensions, - const SimpleTensor::Cells &cells) -{ - TensorDimensionsSet dimensionsNotFoundInCells(dimensions.begin(), - dimensions.end()); - for (const auto &cell : cells) { - for (const auto &elem : cell.first.elements()) { - dimensionsNotFoundInCells.erase(elem.dimension()); - } - } - SimpleTensor::Dimensions - missingDimensions(dimensionsNotFoundInCells.begin(), - dimensionsNotFoundInCells.end()); - std::sort(missingDimensions.begin(), missingDimensions.end()); - TensorAddress::Elements elements; - for (const auto &dimension : missingDimensions) { - elements.emplace_back(dimension, "-"); - } - return TensorAddress(elements); -} - -void -printCells(const SimpleTensor::Cells &cells, std::ostream &out) -{ - out << "{ "; - bool first = true; - for (const auto &cell : cells) { - if (!first) { - out << ", "; - } - out << cell.first << ":" << cell.second; - first = false; - } - out << " }"; -} - -} - -void -SimpleTensor::print(std::ostream &out) const -{ - // This address represents the extra tensor dimensions that are not - // explicitly found in the tensor cells. - TensorAddress extraDimensionsAddress = getAddressNotFoundInCells(dimensions(), cells()); - if (extraDimensionsAddress.elements().empty()) { - printCells(cells(), out); - } else { - out << "( "; - printCells(cells(), out); - out << " * "; - // Multiplying with this cell gives us a way of representing the extra tensor - // dimensions without having explicit syntax for printing dimensions. - SimpleTensor::Cells extraDimensionsCell; - extraDimensionsCell.insert(std::make_pair(extraDimensionsAddress, 1.0)); - printCells(extraDimensionsCell, out); - out << " )"; - } -} - -void -SimpleTensor::accept(TensorVisitor &visitor) const -{ - for (const auto &cell : _cells) { - visitor.visit(cell.first, cell.second); - } -} - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor.h b/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor.h deleted file mode 100644 index aaa3c274f12..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor.h +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/tensor/cell_function.h> -#include <vespa/vespalib/tensor/tensor.h> -#include <vespa/vespalib/tensor/tensor_address.h> -#include <vespa/vespalib/tensor/types.h> -#include <vespa/vespalib/stllike/hash_map.h> -#include <vespa/vespalib/stllike/string.h> -#include <set> - -namespace vespalib { -namespace tensor { - -/** - * A sparse multi-dimensional array. - * - * A sparse tensor is a set of cells containing scalar values. - * Each cell is identified by its address, which consists of a set of dimension -> label pairs, - * where both dimension and label is a string on the form of an identifier or integer. - */ -class SimpleTensor : public Tensor -{ -public: - typedef std::unique_ptr<SimpleTensor> UP; - typedef vespalib::hash_map<TensorAddress, double> Cells; - typedef TensorDimensions Dimensions; - -private: - Dimensions _dimensions; - Cells _cells; - -public: - SimpleTensor(const Dimensions &dimensions_in, const Cells &cells_in); - SimpleTensor(Dimensions &&dimensions_in, Cells &&cells_in); - const Cells &cells() const { return _cells; } - const Dimensions &dimensions() const { return _dimensions; } - bool operator==(const SimpleTensor &rhs) const; - Dimensions combineDimensionsWith(const SimpleTensor &rhs) const; - - virtual eval::ValueType getType() const override; - virtual double sum() const override; - virtual Tensor::UP add(const Tensor &arg) const override; - virtual Tensor::UP subtract(const Tensor &arg) const override; - virtual Tensor::UP multiply(const Tensor &arg) const override; - virtual Tensor::UP min(const Tensor &arg) const override; - virtual Tensor::UP max(const Tensor &arg) const override; - virtual Tensor::UP match(const Tensor &arg) const override; - virtual Tensor::UP apply(const CellFunction &func) const override; - virtual Tensor::UP sum(const vespalib::string &dimension) const override; - virtual bool equals(const Tensor &arg) const override; - virtual void print(std::ostream &out) const override; - virtual vespalib::string toString() const override; - virtual Tensor::UP clone() const override; - virtual void accept(TensorVisitor &visitor) const override; -}; - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_builder.cpp b/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_builder.cpp deleted file mode 100644 index 1d24dd9147d..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_builder.cpp +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include "simple_tensor_builder.h" -#include <vespa/vespalib/tensor/tensor.h> - -namespace vespalib { -namespace tensor { - - -SimpleTensorBuilder::SimpleTensorBuilder() - : TensorBuilder(), - _addressBuilder(), - _cells(), - _dimensionsEnum(), - _dimensions() -{ -} - -SimpleTensorBuilder::~SimpleTensorBuilder() -{ -} - - -TensorBuilder::Dimension -SimpleTensorBuilder::define_dimension(const vespalib::string &dimension) -{ - auto it = _dimensionsEnum.find(dimension); - if (it != _dimensionsEnum.end()) { - return it->second; - } - Dimension res = _dimensionsEnum.size(); - auto insres = _dimensionsEnum.insert(std::make_pair(dimension, res)); - assert(insres.second); - assert(insres.first->second == res); - assert(_dimensions.size() == res); - _dimensions.push_back(dimension); - return res; -} - -TensorBuilder & -SimpleTensorBuilder::add_label(Dimension dimension, - const vespalib::string &label) -{ - assert(dimension <= _dimensions.size()); - _addressBuilder.add(_dimensions[dimension], label); - return *this; -} - -TensorBuilder & -SimpleTensorBuilder::add_cell(double value) -{ - _cells[_addressBuilder.build()] = value; - _addressBuilder.clear(); - return *this; -} - - -Tensor::UP -SimpleTensorBuilder::build() -{ - SimpleTensor::Dimensions dimensions(_dimensions.begin(), _dimensions.end()); - std::sort(dimensions.begin(), dimensions.end()); - Tensor::UP ret = std::make_unique<SimpleTensor>(std::move(dimensions), std::move(_cells)); - SimpleTensor::Cells().swap(_cells); - _dimensionsEnum.clear(); - _dimensions.clear(); - return ret; -} - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_builder.h deleted file mode 100644 index 81d94921b03..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_builder.h +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include "simple_tensor.h" -#include <vespa/vespalib/tensor/tensor_builder.h> -#include <vespa/vespalib/tensor/tensor_address.h> -#include <vespa/vespalib/tensor/tensor_address_builder.h> -#include <vespa/vespalib/stllike/hash_map.h> - -namespace vespalib { -namespace tensor { - -/** - * A simple builder of tensors (sparse multi-dimensional array). - * - * A sparse tensor is a set of cells containing scalar values. - * Each cell is identified by its address, which consists of a set of dimension -> label pairs, - * where both dimension and label is a string on the form of an identifier or integer. - */ -class SimpleTensorBuilder : public TensorBuilder -{ - TensorAddressBuilder _addressBuilder; - SimpleTensor::Cells _cells; - vespalib::hash_map<vespalib::string, uint32_t> _dimensionsEnum; - std::vector<vespalib::string> _dimensions; -public: - SimpleTensorBuilder(); - virtual ~SimpleTensorBuilder(); - - virtual Dimension - define_dimension(const vespalib::string &dimension) override; - virtual TensorBuilder & - add_label(Dimension dimension, - const vespalib::string &label) override; - virtual TensorBuilder &add_cell(double value) override; - - virtual Tensor::UP build() override; -}; - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_dimension_sum.cpp b/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_dimension_sum.cpp deleted file mode 100644 index 435034b8f35..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_dimension_sum.cpp +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "simple_tensor_dimension_sum.h" -#include <vespa/vespalib/tensor/tensor_address_element_iterator.h> - -namespace vespalib { -namespace tensor { - -namespace { - -template <class AddressBuilder, class Address> -void -removeDimension(AddressBuilder &addressBuilder, - const Address &address, - const vespalib::stringref dimension) -{ - addressBuilder.clear(); - for (const auto &elem : address.elements()) { - if (elem.dimension() != dimension) { - addressBuilder.add(elem.dimension(), elem.label()); - } - } -} - -TensorDimensions -removeDimension(const TensorDimensions &dimensions, - const vespalib::string &dimension) -{ - TensorDimensions result = dimensions; - auto itr = std::lower_bound(result.begin(), result.end(), dimension); - if (itr != result.end() && *itr == dimension) { - result.erase(itr); - } - return result; -} - -} - -SimpleTensorDimensionSum::SimpleTensorDimensionSum(const TensorImplType &tensor, - const vespalib::string & - dimension) - : Parent(removeDimension(tensor.dimensions(), dimension)) -{ - AddressBuilderType reducedAddress; - for (const auto &cell : tensor.cells()) { - removeDimension<AddressBuilderType, AddressType> - (reducedAddress, cell.first, dimension); - _builder.insertCell(reducedAddress, cell.second, - [](double cellValue, double rhsValue) { return cellValue + rhsValue; }); - } -} - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_dimension_sum.h b/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_dimension_sum.h deleted file mode 100644 index adcbb2c1825..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_dimension_sum.h +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/tensor/tensor_operation.h> - -namespace vespalib { -namespace tensor { - -/** - * Returns a tensor with the given dimension removed and the cell values in that dimension summed. - */ -class SimpleTensorDimensionSum : public TensorOperation<SimpleTensor> -{ -public: - using TensorImplType = SimpleTensor; - using Parent = TensorOperation<SimpleTensor>; - using AddressBuilderType = typename Parent::AddressBuilderType; - using AddressType = typename Parent::AddressType; - using Parent::_builder; - SimpleTensorDimensionSum(const TensorImplType &tensor, - const vespalib::string &dimension); -}; - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_product.cpp b/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_product.cpp deleted file mode 100644 index b4e7eaf119b..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_product.cpp +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#include <vespa/fastos/fastos.h> -#include "simple_tensor_product.h" -#include <vespa/vespalib/tensor/tensor_address_element_iterator.h> -#include <vespa/vespalib/tensor/dimensions_vector_iterator.h> -#include <vespa/vespalib/tensor/join_tensor_addresses.h> -#include <type_traits> - -namespace vespalib { -namespace tensor { - -namespace { - -template <class Dimensions> -void -calcIntersectDimensions(DimensionsVector &res, - const Dimensions &lhs, const Dimensions &rhs) -{ - std::set_intersection(lhs.cbegin(), lhs.cend(), rhs.cbegin(), rhs.cend(), - std::back_inserter(res)); -} - - -template <class Dimensions> -void -calcIntersectDimensions(DimensionsSet &res, - const Dimensions &lhs, const Dimensions &rhs) -{ - for (const auto &dimension : lhs) { - if (std::binary_search(rhs.begin(), rhs.end(), dimension)) { - res.insert(vespalib::stringref(dimension.c_str(), - dimension.size())); - } - } -} - - -} - - -template <class DimensionsCollection> -void -SimpleTensorProduct::template bruteForceProduct(const TensorImplType &lhs, - const TensorImplType &rhs) -{ - DimensionsCollection iDims; - calcIntersectDimensions<Dimensions>(iDims, - lhs.dimensions(), rhs.dimensions()); - AddressBuilderType combinedAddress; - for (const auto &lhsCell : lhs.cells()) { - for (const auto &rhsCell : rhs.cells()) { - bool combineSuccess = joinTensorAddresses<AddressBuilderType, - AddressType, AddressType> - (combinedAddress, iDims, - lhsCell.first, rhsCell.first); - if (combineSuccess) { - _builder.insertCell(combinedAddress, lhsCell.second * rhsCell.second); - } - } - } -} - - -void -SimpleTensorProduct::fastProduct(const TensorImplType &lhs, - const TensorImplType &rhs) -{ - const typename TensorImplType::Cells &rhsCells = rhs.cells(); - for (const auto &lhsCell : lhs.cells()) { - auto itr = rhsCells.find(lhsCell.first); - if (itr != rhsCells.end()) { - _builder.insertCell(lhsCell.first, lhsCell.second * itr->second); - } - } -} - - -SimpleTensorProduct::SimpleTensorProduct(const TensorImplType &lhs, - const TensorImplType &rhs) - : Parent(lhs.combineDimensionsWith(rhs)) -{ -#if 0 - /* Commented ut for now since we want to see brute force performance. */ - // All dimensions are common - if (lhs.dimensions().size() == rhs.dimensions().size() && - lhs.dimensions().size() == _builder.dimensions().size()) { - fastProduct(lhs, rhs); - return; - } - // TODO: Handle zero cells or zero dimensions cases - // No dimensions are common - if (lhs.dimensions().size() + rhs.dimensions().size() == - _builder.dimensions().size()) { - bruteForceNoCommonDimensionProduct(lhs, rhs); - return; - } - // lhs dimensions equals common dimensions - if (rhs.dimensions().size() == _builder.dimensions().size()) { - } - // rhs dimensions equals common dimensions - if (lhs.dimensions().size() == _builder.dimensions().size()) { - } -#endif -#if 1 - // few common dimensions - bruteForceProduct<DimensionsVector>(lhs, rhs); -#else - // many common dimensions, too expensive to iterate through all of - // them if each cell has relatively few dimensions. - bruteForceProduct<DimensionsSet>(lhs, rhs); -#endif -} - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_product.h b/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_product.h deleted file mode 100644 index c811dd2d699..00000000000 --- a/vespalib/src/vespa/vespalib/tensor/simple/simple_tensor_product.h +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/vespalib/tensor/tensor_operation.h> - -namespace vespalib { -namespace tensor { - -/** - * Returns the sparse tensor product of the two given tensors. - * This is all combinations of all cells in the first tensor with all cells of - * the second tensor, except the combinations which would have multiple labels - * for the same dimension due to shared dimensions between the two tensors. - * - * If there are no overlapping dimensions this is the regular tensor product. - * If the two tensors have exactly the same dimensions this is the Hadamard product. - * - * The sparse tensor is associative and commutative. Its dimensions are the - * set of the dimensions of the two input tensors. - */ -class SimpleTensorProduct : public TensorOperation<SimpleTensor> -{ -public: - using TensorImplType = SimpleTensor; - using Parent = TensorOperation<SimpleTensor>; - using Dimensions = typename Parent::Dimensions; - using AddressBuilderType = typename Parent::AddressBuilderType; - using AddressRefType = typename Parent::AddressRefType; - using AddressType = typename Parent::AddressType; - using Parent::_builder; - -private: - template <class DimensionsCollection> - void - bruteForceProduct(const TensorImplType &lhs, const TensorImplType &rhs); - - void - fastProduct(const TensorImplType &lhs, const TensorImplType &rhs); - -public: - SimpleTensorProduct(const TensorImplType &lhs, const TensorImplType &rhs); -}; - - -} // namespace vespalib::tensor -} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt new file mode 100644 index 00000000000..7d8725ad610 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt @@ -0,0 +1,11 @@ +# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +vespa_add_library(vespalib_vespalib_tensor_sparse OBJECT + SOURCES + sparse_tensor.cpp + sparse_tensor_address_combiner.cpp + sparse_tensor_address_reducer.cpp + sparse_tensor_match.cpp + sparse_tensor_builder.cpp + sparse_tensor_unsorted_address_builder.cpp + DEPENDS +) diff --git a/vespalib/src/vespa/vespalib/tensor/compact/direct_compact_tensor_v2_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h index 6864895547a..1d5b4b550a4 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/direct_compact_tensor_v2_builder.h +++ b/vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h @@ -3,26 +3,25 @@ #pragma once #include <vespa/vespalib/tensor/direct_tensor_builder.h> -#include "compact_tensor_v2.h" -#include "compact_tensor_v2_address_builder.h" -#include "compact_tensor_v2_address_padder.h" +#include "sparse_tensor.h" +#include "sparse_tensor_address_builder.h" +#include "sparse_tensor_address_padder.h" namespace vespalib { namespace tensor { /** - * Utility class to build tensors of type CompactTensorV2, to be used by + * Utility class to build tensors of type SparseTensor, to be used by * tensor operations. */ -template <> class DirectTensorBuilder<CompactTensorV2> +template <> class DirectTensorBuilder<SparseTensor> { public: - using TensorImplType = CompactTensorV2; + using TensorImplType = SparseTensor; using Dimensions = typename TensorImplType::Dimensions; using Cells = typename TensorImplType::Cells; - using AddressBuilderType = CompactTensorV2AddressBuilder; - using AddressRefType = CompactTensorAddressRef; - using AddressType = CompactTensorAddress; + using AddressBuilderType = SparseTensorAddressBuilder; + using AddressRefType = SparseTensorAddressRef; private: Stash _stash; @@ -34,8 +33,8 @@ public: copyCells(const Cells &cells_in) { for (const auto &cell : cells_in) { - CompactTensorAddressRef oldRef = cell.first; - CompactTensorAddressRef newRef(oldRef, _stash); + SparseTensorAddressRef oldRef = cell.first; + SparseTensorAddressRef newRef(oldRef, _stash); _cells[newRef] = cell.second; } } @@ -43,12 +42,12 @@ public: void copyCells(const Cells &cells_in, const Dimensions &cells_in_dimensions) { - CompactTensorV2AddressPadder addressPadder(_dimensions, + SparseTensorAddressPadder addressPadder(_dimensions, cells_in_dimensions); for (const auto &cell : cells_in) { addressPadder.padAddress(cell.first); - CompactTensorAddressRef oldRef = addressPadder.getAddressRef(); - CompactTensorAddressRef newRef(oldRef, _stash); + SparseTensorAddressRef oldRef = addressPadder.getAddressRef(); + SparseTensorAddressRef newRef(oldRef, _stash); _cells[newRef] = cell.second; } } @@ -91,38 +90,38 @@ public: } Tensor::UP build() { - return std::make_unique<CompactTensorV2>(std::move(_dimensions), + return std::make_unique<SparseTensor>(std::move(_dimensions), std::move(_cells), std::move(_stash)); } template <class Function> - void insertCell(CompactTensorAddressRef address, double value, + void insertCell(SparseTensorAddressRef address, double value, Function &&func) { - CompactTensorAddressRef oldRef(address); + SparseTensorAddressRef oldRef(address); auto res = _cells.insert(std::make_pair(oldRef, value)); if (res.second) { // Replace key with own copy - res.first->first = CompactTensorAddressRef(oldRef, _stash); + res.first->first = SparseTensorAddressRef(oldRef, _stash); } else { res.first->second = func(res.first->second, value); } } - void insertCell(CompactTensorAddressRef address, double value) { + void insertCell(SparseTensorAddressRef address, double value) { // This address should not already exist and a new cell should be inserted. insertCell(address, value, [](double, double) -> double { abort(); }); } template <class Function> - void insertCell(CompactTensorV2AddressBuilder &address, double value, + void insertCell(SparseTensorAddressBuilder &address, double value, Function &&func) { insertCell(address.getAddressRef(), value, func); } - void insertCell(CompactTensorV2AddressBuilder &address, double value) { + void insertCell(SparseTensorAddressBuilder &address, double value) { // This address should not already exist and a new cell should be inserted. insertCell(address.getAddressRef(), value, [](double, double) -> double { abort(); }); } diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp new file mode 100644 index 00000000000..5e7ec5b1db3 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp @@ -0,0 +1,311 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/fastos/fastos.h> +#include "sparse_tensor.h" +#include "sparse_tensor_address_builder.h" +#include "sparse_tensor_match.h" +#include "sparse_tensor_apply.hpp" +#include "sparse_tensor_reduce.hpp" +#include <vespa/vespalib/tensor/tensor_address_builder.h> +#include <vespa/vespalib/tensor/tensor_apply.h> +#include <vespa/vespalib/tensor/tensor_visitor.h> +#include <vespa/vespalib/eval/operation.h> +#include <sstream> + +using vespalib::eval::TensorSpec; + +namespace vespalib { +namespace tensor { + +namespace { + +using Cells = SparseTensor::Cells; + +void +copyCells(Cells &cells, const Cells &cells_in, Stash &stash) +{ + for (const auto &cell : cells_in) { + SparseTensorAddressRef oldRef = cell.first; + SparseTensorAddressRef newRef(oldRef, stash); + cells[newRef] = cell.second; + } +} + +void +printAddress(std::ostream &out, const SparseTensorAddressRef &ref, + const TensorDimensions &dimensions) +{ + out << "{"; + bool first = true; + SparseTensorAddressDecoder addr(ref); + for (auto &dim : dimensions) { + auto label = addr.decodeLabel(); + if (label.size() != 0u) { + if (!first) { + out << ","; + } + out << dim << ":" << label; + first = false; + } + } + assert(!addr.valid()); + out << "}"; +} + +} + +SparseTensor::SparseTensor(const Dimensions &dimensions_in, + const Cells &cells_in) + : _cells(), + _dimensions(dimensions_in), + _stash(STASH_CHUNK_SIZE) +{ + copyCells(_cells, cells_in, _stash); +} + + +SparseTensor::SparseTensor(Dimensions &&dimensions_in, + Cells &&cells_in, Stash &&stash_in) + : _cells(std::move(cells_in)), + _dimensions(std::move(dimensions_in)), + _stash(std::move(stash_in)) +{ +} + + +bool +SparseTensor::operator==(const SparseTensor &rhs) const +{ + return _dimensions == rhs._dimensions && _cells == rhs._cells; +} + + +SparseTensor::Dimensions +SparseTensor::combineDimensionsWith(const SparseTensor &rhs) const +{ + Dimensions result; + std::set_union(_dimensions.cbegin(), _dimensions.cend(), + rhs._dimensions.cbegin(), rhs._dimensions.cend(), + std::back_inserter(result)); + return result; +} + +eval::ValueType +SparseTensor::getType() const +{ + if (_dimensions.empty()) { + return eval::ValueType::double_type(); + } + std::vector<eval::ValueType::Dimension> dimensions; + std::copy(_dimensions.begin(), _dimensions.end(), std::back_inserter(dimensions)); + return eval::ValueType::tensor_type(dimensions); +} + +double +SparseTensor::sum() const +{ + double result = 0.0; + for (const auto &cell : _cells) { + result += cell.second; + } + return result; +} + +Tensor::UP +SparseTensor::add(const Tensor &arg) const +{ + const SparseTensor *rhs = dynamic_cast<const SparseTensor *>(&arg); + if (!rhs) { + return Tensor::UP(); + } + return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue) + { return lhsValue + rhsValue; }); +} + +Tensor::UP +SparseTensor::subtract(const Tensor &arg) const +{ + const SparseTensor *rhs = dynamic_cast<const SparseTensor *>(&arg); + if (!rhs) { + return Tensor::UP(); + } + return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue) + { return lhsValue - rhsValue; }); +} + +Tensor::UP +SparseTensor::multiply(const Tensor &arg) const +{ + const SparseTensor *rhs = dynamic_cast<const SparseTensor *>(&arg); + if (!rhs) { + return Tensor::UP(); + } + return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue) + { return lhsValue * rhsValue; }); +} + +Tensor::UP +SparseTensor::min(const Tensor &arg) const +{ + const SparseTensor *rhs = dynamic_cast<const SparseTensor *>(&arg); + if (!rhs) { + return Tensor::UP(); + } + return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue) + { return std::min(lhsValue, rhsValue); }); +} + +Tensor::UP +SparseTensor::max(const Tensor &arg) const +{ + const SparseTensor *rhs = dynamic_cast<const SparseTensor *>(&arg); + if (!rhs) { + return Tensor::UP(); + } + return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue) + { return std::max(lhsValue, rhsValue); }); +} + +Tensor::UP +SparseTensor::match(const Tensor &arg) const +{ + const SparseTensor *rhs = dynamic_cast<const SparseTensor *>(&arg); + if (!rhs) { + return Tensor::UP(); + } + return SparseTensorMatch(*this, *rhs).result(); +} + +Tensor::UP +SparseTensor::apply(const CellFunction &func) const +{ + return TensorApply<SparseTensor>(*this, func).result(); +} + +Tensor::UP +SparseTensor::sum(const vespalib::string &dimension) const +{ + return sparse::reduce(*this, { dimension }, + [](double lhsValue, double rhsValue) + { return lhsValue + rhsValue; }); +} + +bool +SparseTensor::equals(const Tensor &arg) const +{ + const SparseTensor *rhs = dynamic_cast<const SparseTensor *>(&arg); + if (!rhs) { + return false; + } + return *this == *rhs; +} + +vespalib::string +SparseTensor::toString() const +{ + std::ostringstream stream; + stream << *this; + return stream.str(); +} + +Tensor::UP +SparseTensor::clone() const +{ + return std::make_unique<SparseTensor>(_dimensions, _cells); +} + +namespace { + +void +buildAddress(const SparseTensor::Dimensions &dimensions, + SparseTensorAddressDecoder &decoder, + TensorSpec::Address &address) +{ + for (const auto &dimension : dimensions) { + auto label = decoder.decodeLabel(); + if (!label.empty()) { + address.emplace(std::make_pair(dimension, TensorSpec::Label(label))); + } + } + assert(!decoder.valid()); +} + +} + +TensorSpec +SparseTensor::toSpec() const +{ + TensorSpec result(getType().to_spec()); + TensorSpec::Address address; + for (const auto &cell : _cells) { + SparseTensorAddressDecoder decoder(cell.first); + buildAddress(_dimensions, decoder, address); + result.add(address, cell.second); + address.clear(); + } + if (_dimensions.empty() && _cells.empty()) { + result.add(address, 0.0); + } + return result; +} + +void +SparseTensor::print(std::ostream &out) const +{ + out << "{ "; + bool first = true; + for (const auto &cell : cells()) { + if (!first) { + out << ", "; + } + printAddress(out, cell.first, _dimensions); + out << ":" << cell.second; + first = false; + } + out << " }"; +} + +void +SparseTensor::accept(TensorVisitor &visitor) const +{ + TensorAddressBuilder addrBuilder; + TensorAddress addr; + for (const auto &cell : _cells) { + SparseTensorAddressDecoder decoder(cell.first); + addrBuilder.clear(); + for (const auto &dimension : _dimensions) { + auto label = decoder.decodeLabel(); + if (label.size() != 0u) { + addrBuilder.add(dimension, label); + } + } + assert(!decoder.valid()); + addr = addrBuilder.build(); + visitor.visit(addr, cell.second); + } +} + +Tensor::UP +SparseTensor::apply(const eval::BinaryOperation &op, const Tensor &arg) const +{ + const SparseTensor *rhs = dynamic_cast<const SparseTensor *>(&arg); + if (!rhs) { + return Tensor::UP(); + } + return sparse::apply(*this, *rhs, + [&op](double lhsValue, double rhsValue) + { return op.eval(lhsValue, rhsValue); }); +} + +Tensor::UP +SparseTensor::reduce(const eval::BinaryOperation &op, + const std::vector<vespalib::string> &dimensions) const +{ + return sparse::reduce(*this, + (dimensions.empty() ? _dimensions : dimensions), + [&op](double lhsValue, double rhsValue) + { return op.eval(lhsValue, rhsValue); }); +} + +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h index d7a3f3e4e5c..d788a55885e 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2.h +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h @@ -5,7 +5,7 @@ #include <vespa/vespalib/tensor/cell_function.h> #include <vespa/vespalib/tensor/tensor.h> #include <vespa/vespalib/tensor/tensor_address.h> -#include "compact_tensor_address.h" +#include "sparse_tensor_address_ref.h" #include <vespa/vespalib/tensor/types.h> #include <vespa/vespalib/stllike/hash_map.h> #include <vespa/vespalib/stllike/string.h> @@ -19,10 +19,10 @@ namespace tensor { * improve CPU cache and TLB hit ratio, relative to SimpleTensor * implementation. */ -class CompactTensorV2 : public Tensor +class SparseTensor : public Tensor { public: - typedef vespalib::hash_map<CompactTensorAddressRef, double> Cells; + typedef vespalib::hash_map<SparseTensorAddressRef, double> Cells; typedef TensorDimensions Dimensions; static constexpr size_t STASH_CHUNK_SIZE = 16384u; @@ -33,14 +33,14 @@ private: Stash _stash; public: - explicit CompactTensorV2(const Dimensions &dimensions_in, + explicit SparseTensor(const Dimensions &dimensions_in, const Cells &cells_in); - CompactTensorV2(Dimensions &&dimensions_in, + SparseTensor(Dimensions &&dimensions_in, Cells &&cells_in, Stash &&stash_in); const Cells &cells() const { return _cells; } const Dimensions &dimensions() const { return _dimensions; } - bool operator==(const CompactTensorV2 &rhs) const; - Dimensions combineDimensionsWith(const CompactTensorV2 &rhs) const; + bool operator==(const SparseTensor &rhs) const; + Dimensions combineDimensionsWith(const SparseTensor &rhs) const; virtual eval::ValueType getType() const override; virtual double sum() const override; @@ -52,10 +52,16 @@ public: virtual Tensor::UP match(const Tensor &arg) const override; virtual Tensor::UP apply(const CellFunction &func) const override; virtual Tensor::UP sum(const vespalib::string &dimension) const override; + virtual Tensor::UP apply(const eval::BinaryOperation &op, + const Tensor &arg) const override; + virtual Tensor::UP reduce(const eval::BinaryOperation &op, + const std::vector<vespalib::string> &dimensions) + const override; virtual bool equals(const Tensor &arg) const override; virtual void print(std::ostream &out) const override; virtual vespalib::string toString() const override; virtual Tensor::UP clone() const override; + virtual eval::TensorSpec toSpec() const override; virtual void accept(TensorVisitor &visitor) const override; }; diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_address_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h index a029d84433f..c1678d89018 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_address_builder.h +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h @@ -4,14 +4,12 @@ #include <vespa/vespalib/stllike/string.h> #include <vector> -#include "compact_tensor_address_ref.h" +#include "sparse_tensor_address_ref.h" namespace vespalib { namespace tensor { -class CompactTensorAddress; - /** * A writer to serialize tensor addresses into a compact representation. * All dimensions in the tensors are present, empty label is the "undefined" @@ -19,7 +17,7 @@ class CompactTensorAddress; * * Format: (labelStr NUL)* */ -class CompactTensorV2AddressBuilder +class SparseTensorAddressBuilder { private: std::vector<char> _address; @@ -31,15 +29,15 @@ private: _address.insert(_address.end(), cstr, cstr + str.size() + 1); } public: - CompactTensorV2AddressBuilder() + SparseTensorAddressBuilder() : _address() { } void add(vespalib::stringref label) { append(label); } void addUndefined() { _address.emplace_back('\0'); } void clear() { _address.clear(); } - CompactTensorAddressRef getAddressRef() const { - return CompactTensorAddressRef(&_address[0], _address.size()); + SparseTensorAddressRef getAddressRef() const { + return SparseTensorAddressRef(&_address[0], _address.size()); } bool empty() const { return _address.empty(); } }; diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp new file mode 100644 index 00000000000..53cf90e2db0 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp @@ -0,0 +1,69 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/fastos/fastos.h> +#include "sparse_tensor_address_combiner.h" +#include "sparse_tensor_address_decoder.h" + +namespace vespalib { +namespace tensor { +namespace sparse { + +TensorAddressCombiner::TensorAddressCombiner(const TensorDimensions &lhs, + const TensorDimensions &rhs) +{ + auto rhsItr = rhs.cbegin(); + auto rhsItrEnd = rhs.cend(); + for (auto &lhsDim : lhs) { + while (rhsItr != rhsItrEnd && *rhsItr < lhsDim) { + _ops.push_back(AddressOp::RHS); + ++rhsItr; + } + if (rhsItr != rhsItrEnd && *rhsItr == lhsDim) { + _ops.push_back(AddressOp::BOTH); + ++rhsItr; + } else { + _ops.push_back(AddressOp::LHS); + } + } + while (rhsItr != rhsItrEnd) { + _ops.push_back(AddressOp::RHS); + ++rhsItr; + } +} + +TensorAddressCombiner::~TensorAddressCombiner() +{ +} + +bool +TensorAddressCombiner::combine(SparseTensorAddressRef lhsRef, + SparseTensorAddressRef rhsRef) +{ + clear(); + SparseTensorAddressDecoder lhs(lhsRef); + SparseTensorAddressDecoder rhs(rhsRef); + for (auto op : _ops) { + switch (op) { + case AddressOp::LHS: + add(lhs.decodeLabel()); + break; + case AddressOp::RHS: + add(rhs.decodeLabel()); + break; + case AddressOp::BOTH: + auto lhsLabel(lhs.decodeLabel()); + auto rhsLabel(rhs.decodeLabel()); + if (lhsLabel != rhsLabel) { + return false; + } + add(lhsLabel); + } + } + assert(!lhs.valid()); + assert(!rhs.valid()); + return true; +} + +} // namespace vespalib::tensor::sparse +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h new file mode 100644 index 00000000000..72717396a02 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h @@ -0,0 +1,39 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "sparse_tensor_address_builder.h" +#include <vespa/vespalib/tensor/types.h> + +namespace vespalib { +namespace tensor { +namespace sparse { + +/** + * Combine two tensor addresses to a new tensor address. Common dimensions + * must have matching labels. + */ +class TensorAddressCombiner : public SparseTensorAddressBuilder +{ + enum class AddressOp + { + LHS, + RHS, + BOTH + }; + + std::vector<AddressOp> _ops; + +public: + TensorAddressCombiner(const TensorDimensions &lhs, + const TensorDimensions &rhs); + + ~TensorAddressCombiner(); + + bool combine(SparseTensorAddressRef lhsRef, SparseTensorAddressRef rhsRef); +}; + + +} // namespace vespalib::tensor::sparse +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_address_decoder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h index 3d22ad69b6f..94cb9373bc2 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_address_decoder.h +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h @@ -3,7 +3,7 @@ #pragma once #include <vespa/vespalib/stllike/string.h> -#include "compact_tensor_address_ref.h" +#include "sparse_tensor_address_ref.h" namespace vespalib { @@ -13,12 +13,12 @@ namespace tensor { /** * A decoder for a serialized tensor address, with only labels present. */ -class CompactTensorV2AddressDecoder +class SparseTensorAddressDecoder { const char *_cur; const char *_end; public: - CompactTensorV2AddressDecoder(CompactTensorAddressRef ref) + SparseTensorAddressDecoder(SparseTensorAddressRef ref) : _cur(static_cast<const char *>(ref.start())), _end(_cur + ref.size()) { diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_address_padder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h index 9ab2c8833ba..5f0c95033b3 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_address_padder.h +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h @@ -2,18 +2,18 @@ #pragma once -#include "compact_tensor_v2_address_builder.h" -#include "compact_tensor_v2_address_decoder.h" +#include "sparse_tensor_address_builder.h" +#include "sparse_tensor_address_decoder.h" namespace vespalib { namespace tensor { /** - * This class transforms serialized compact tensor v2 addresses by padding + * This class transforms serialized sparse tensor addresses by padding * in "undefined" labels for new dimensions. */ -class CompactTensorV2AddressPadder : public CompactTensorV2AddressBuilder +class SparseTensorAddressPadder : public SparseTensorAddressBuilder { enum class PadOp { @@ -24,9 +24,9 @@ class CompactTensorV2AddressPadder : public CompactTensorV2AddressBuilder std::vector<PadOp> _padOps; public: - CompactTensorV2AddressPadder(const TensorDimensions &resultDims, + SparseTensorAddressPadder(const TensorDimensions &resultDims, const TensorDimensions &inputDims) - : CompactTensorV2AddressBuilder(), + : SparseTensorAddressBuilder(), _padOps() { auto resultDimsItr = resultDims.cbegin(); @@ -47,10 +47,10 @@ public: } void - padAddress(CompactTensorAddressRef ref) + padAddress(SparseTensorAddressRef ref) { clear(); - CompactTensorV2AddressDecoder addr(ref); + SparseTensorAddressDecoder addr(ref); for (auto op : _padOps) { switch (op) { case PadOp::PAD: diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp new file mode 100644 index 00000000000..2d3bbaef043 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp @@ -0,0 +1,51 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/fastos/fastos.h> +#include "sparse_tensor_address_reducer.h" + +namespace vespalib { +namespace tensor { +namespace sparse { + +TensorAddressReducer::TensorAddressReducer(const TensorDimensions &dims, + const std::vector<vespalib::string> & + removeDimensions) + : SparseTensorAddressBuilder(), + _ops() +{ + TensorDimensionsSet removeSet(removeDimensions.cbegin(), + removeDimensions.cend()); + _ops.reserve(dims.size()); + for (auto &dim : dims) { + if (removeSet.find(dim) != removeSet.end()) { + _ops.push_back(AddressOp::REMOVE); + } else { + _ops.push_back(AddressOp::COPY); + } + } +} + +TensorDimensions +TensorAddressReducer::remainingDimensions(const TensorDimensions &dimensions, + const std::vector<vespalib::string> & + removeDimensions) +{ + TensorDimensionsSet removeSet(removeDimensions.cbegin(), + removeDimensions.cend()); + TensorDimensions result; + result.reserve(dimensions.size()); + for (auto &dim : dimensions) { + if (removeSet.find(dim) == removeSet.end()) { + result.push_back(dim); + } + } + return std::move(result); +} + +TensorAddressReducer::~TensorAddressReducer() +{ +} + +} // namespace vespalib::tensor::sparse +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h new file mode 100644 index 00000000000..775607ca059 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h @@ -0,0 +1,58 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "sparse_tensor_address_builder.h" +#include <vespa/vespalib/tensor/types.h> +#include "sparse_tensor_address_decoder.h" + +namespace vespalib { +namespace tensor { +namespace sparse { + +/** + * Reduce sparse tensor address by removing one or more dimensions. + */ +class TensorAddressReducer : public SparseTensorAddressBuilder +{ + enum AddressOp + { + REMOVE, + COPY + }; + + using AddressOps = std::vector<AddressOp>; + + AddressOps _ops; + +public: + TensorAddressReducer(const TensorDimensions &dims, + const std::vector<vespalib::string> &removeDimensions); + + ~TensorAddressReducer(); + + static TensorDimensions + remainingDimensions(const TensorDimensions &dimensions, + const std::vector<vespalib::string> &removeDimensions); + + void reduce(SparseTensorAddressRef ref) + { + clear(); + SparseTensorAddressDecoder decoder(ref); + for (auto op : _ops) { + switch (op) { + case AddressOp::REMOVE: + decoder.skipLabel(); + break; + case AddressOp::COPY: + add(decoder.decodeLabel()); + } + } + assert(!decoder.valid()); + } +}; + + +} // namespace vespalib::tensor::sparse +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address_ref.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_ref.h index fa49e2fd39c..4358ce501a2 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_address_ref.h +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_ref.h @@ -16,24 +16,24 @@ namespace tensor { /** * A reference to a compact sparse immutable address to a tensor cell. */ -class CompactTensorAddressRef +class SparseTensorAddressRef { const void *_start; size_t _size; size_t _hash; public: - CompactTensorAddressRef() + SparseTensorAddressRef() : _start(nullptr), _size(0u), _hash(0u) { } - CompactTensorAddressRef(const void *start_in, size_t size_in) + SparseTensorAddressRef(const void *start_in, size_t size_in) : _start(start_in), _size(size_in), _hash(calcHash()) { } - CompactTensorAddressRef(const CompactTensorAddressRef rhs, Stash &stash) + SparseTensorAddressRef(const SparseTensorAddressRef rhs, Stash &stash) : _start(nullptr), _size(rhs._size), _hash(rhs._hash) @@ -47,7 +47,7 @@ public: size_t calcHash() const { return hashValue(_start, _size); } - bool operator<(const CompactTensorAddressRef &rhs) const { + bool operator<(const SparseTensorAddressRef &rhs) const { size_t minSize = std::min(_size, rhs._size); int res = memcmp(_start, rhs._start, minSize); if (res != 0) { @@ -56,7 +56,7 @@ public: return _size < rhs._size; } - bool operator==(const CompactTensorAddressRef &rhs) const + bool operator==(const SparseTensorAddressRef &rhs) const { if (_size != rhs._size || _hash != rhs._hash) { return false; diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h new file mode 100644 index 00000000000..e0a8b2cee5b --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h @@ -0,0 +1,23 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +namespace vespalib { +namespace tensor { +class Tensor; +class SparseTensor; +namespace sparse { + +/** + * Create new tensor using all combinations of input tensor cells with matching + * labels for common dimensions, using func to calculate new cell value + * based on the cell values in the input tensors. + */ +template <typename Function> +std::unique_ptr<Tensor> +apply(const SparseTensor &lhs, const SparseTensor &rhs, Function &&func); + + +} // namespace vespalib::tensor::sparse +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp new file mode 100644 index 00000000000..6c055d8547b --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp @@ -0,0 +1,35 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "sparse_tensor_apply.h" +#include "sparse_tensor_address_combiner.h" +#include <vespa/vespalib/tensor/direct_tensor_builder.h> +#include "direct_sparse_tensor_builder.h" + +namespace vespalib { +namespace tensor { +namespace sparse { + +template <typename Function> +std::unique_ptr<Tensor> +apply(const SparseTensor &lhs, const SparseTensor &rhs, Function &&func) +{ + DirectTensorBuilder<SparseTensor> builder(lhs.combineDimensionsWith(rhs)); + TensorAddressCombiner addressCombiner(lhs.dimensions(), rhs.dimensions()); + for (const auto &lhsCell : lhs.cells()) { + for (const auto &rhsCell : rhs.cells()) { + bool combineSuccess = addressCombiner.combine(lhsCell.first, + rhsCell.first); + if (combineSuccess) { + builder.insertCell(addressCombiner.getAddressRef(), + func(lhsCell.second, rhsCell.second)); + } + } + } + return builder.build(); +} + +} // namespace vespalib::tensor::sparse +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_builder.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp index 379943f10eb..bb00d9b2e19 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_builder.cpp +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp @@ -1,30 +1,30 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -#include "compact_tensor_v2_builder.h" +#include "sparse_tensor_builder.h" #include <vespa/vespalib/tensor/tensor.h> namespace vespalib { namespace tensor { -CompactTensorV2Builder::CompactTensorV2Builder() +SparseTensorBuilder::SparseTensorBuilder() : TensorBuilder(), _addressBuilder(), _normalizedAddressBuilder(), _cells(), - _stash(CompactTensorV2::STASH_CHUNK_SIZE), + _stash(SparseTensor::STASH_CHUNK_SIZE), _dimensionsEnum(), _dimensions(), _sortedDimensions() { } -CompactTensorV2Builder::~CompactTensorV2Builder() +SparseTensorBuilder::~SparseTensorBuilder() { } void -CompactTensorV2Builder::makeSortedDimensions() +SparseTensorBuilder::makeSortedDimensions() { assert(_sortedDimensions.empty()); assert(_cells.empty()); @@ -34,7 +34,7 @@ CompactTensorV2Builder::makeSortedDimensions() TensorBuilder::Dimension -CompactTensorV2Builder::define_dimension(const vespalib::string &dimension) +SparseTensorBuilder::define_dimension(const vespalib::string &dimension) { auto it = _dimensionsEnum.find(dimension); if (it != _dimensionsEnum.end()) { @@ -50,7 +50,7 @@ CompactTensorV2Builder::define_dimension(const vespalib::string &dimension) } TensorBuilder & -CompactTensorV2Builder::add_label(Dimension dimension, +SparseTensorBuilder::add_label(Dimension dimension, const vespalib::string &label) { assert(dimension <= _dimensions.size()); @@ -59,15 +59,15 @@ CompactTensorV2Builder::add_label(Dimension dimension, } TensorBuilder & -CompactTensorV2Builder::add_cell(double value) +SparseTensorBuilder::add_cell(double value) { if (_dimensions.size() != _sortedDimensions.size()) { makeSortedDimensions(); } _addressBuilder.buildTo(_normalizedAddressBuilder, _sortedDimensions); - CompactTensorAddressRef taddress(_normalizedAddressBuilder.getAddressRef()); - // Make a persistent copy of compact tensor address owned by _stash - CompactTensorAddressRef address(taddress, _stash); + SparseTensorAddressRef taddress(_normalizedAddressBuilder.getAddressRef()); + // Make a persistent copy of sparse tensor address owned by _stash + SparseTensorAddressRef address(taddress, _stash); _cells[address] = value; _addressBuilder.clear(); _normalizedAddressBuilder.clear(); @@ -76,18 +76,18 @@ CompactTensorV2Builder::add_cell(double value) Tensor::UP -CompactTensorV2Builder::build() +SparseTensorBuilder::build() { assert(_addressBuilder.empty()); if (_dimensions.size() != _sortedDimensions.size()) { makeSortedDimensions(); } - CompactTensorV2::Dimensions dimensions(_sortedDimensions.begin(), + SparseTensor::Dimensions dimensions(_sortedDimensions.begin(), _sortedDimensions.end()); - Tensor::UP ret = std::make_unique<CompactTensorV2>(std::move(dimensions), + Tensor::UP ret = std::make_unique<SparseTensor>(std::move(dimensions), std::move(_cells), std::move(_stash)); - CompactTensorV2::Cells().swap(_cells); + SparseTensor::Cells().swap(_cells); _dimensionsEnum.clear(); _dimensions.clear(); _sortedDimensions.clear(); diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h index a206d882f32..be0791a59c1 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_builder.h +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h @@ -2,9 +2,9 @@ #pragma once -#include "compact_tensor_v2.h" -#include "compact_tensor_v2_address_builder.h" -#include "compact_tensor_unsorted_address_builder.h" +#include "sparse_tensor.h" +#include "sparse_tensor_address_builder.h" +#include "sparse_tensor_unsorted_address_builder.h" #include <vespa/vespalib/tensor/tensor_builder.h> #include <vespa/vespalib/tensor/tensor_address.h> #include <vespa/vespalib/stllike/hash_map.h> @@ -14,13 +14,13 @@ namespace vespalib { namespace tensor { /** - * A builder of compact tensors. + * A builder of sparse tensors. */ -class CompactTensorV2Builder : public TensorBuilder +class SparseTensorBuilder : public TensorBuilder { - CompactTensorUnsortedAddressBuilder _addressBuilder; // unsorted dimensions - CompactTensorV2AddressBuilder _normalizedAddressBuilder; // sorted dimensions - CompactTensorV2::Cells _cells; + SparseTensorUnsortedAddressBuilder _addressBuilder; // unsorted dimensions + SparseTensorAddressBuilder _normalizedAddressBuilder; // sorted dimensions + SparseTensor::Cells _cells; Stash _stash; vespalib::hash_map<vespalib::string, uint32_t> _dimensionsEnum; std::vector<vespalib::string> _dimensions; @@ -28,8 +28,8 @@ class CompactTensorV2Builder : public TensorBuilder void makeSortedDimensions(); public: - CompactTensorV2Builder(); - virtual ~CompactTensorV2Builder(); + SparseTensorBuilder(); + virtual ~SparseTensorBuilder(); virtual Dimension define_dimension(const vespalib::string &dimension) override; diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_match.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp index c8044a5f828..35da291bbee 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_match.cpp +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp @@ -1,8 +1,8 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/fastos/fastos.h> -#include "compact_tensor_v2_match.h" -#include "compact_tensor_v2_address_decoder.h" +#include "sparse_tensor_match.h" +#include "sparse_tensor_address_decoder.h" namespace vespalib { namespace tensor { @@ -44,12 +44,12 @@ buildTransformOps(std::vector<AddressOp> &ops, bool -transformAddress(CompactTensorV2AddressBuilder &builder, - CompactTensorAddressRef ref, +transformAddress(SparseTensorAddressBuilder &builder, + SparseTensorAddressRef ref, const std::vector<AddressOp> &ops) { builder.clear(); - CompactTensorV2AddressDecoder addr(ref); + SparseTensorAddressDecoder addr(ref); for (auto op : ops) { switch (op) { case AddressOp::REMOVE: @@ -75,7 +75,7 @@ transformAddress(CompactTensorV2AddressBuilder &builder, void -CompactTensorV2Match::fastMatch(const TensorImplType &lhs, +SparseTensorMatch::fastMatch(const TensorImplType &lhs, const TensorImplType &rhs) { for (const auto &lhsCell : lhs.cells()) { @@ -87,19 +87,19 @@ CompactTensorV2Match::fastMatch(const TensorImplType &lhs, } void -CompactTensorV2Match::slowMatch(const TensorImplType &lhs, +SparseTensorMatch::slowMatch(const TensorImplType &lhs, const TensorImplType &rhs) { std::vector<AddressOp> ops; - CompactTensorV2AddressBuilder addressBuilder; - CompactTensorV2AddressPadder addressPadder(_builder.dimensions(), + SparseTensorAddressBuilder addressBuilder; + SparseTensorAddressPadder addressPadder(_builder.dimensions(), lhs.dimensions()); buildTransformOps(ops, lhs.dimensions(), rhs.dimensions()); for (const auto &lhsCell : lhs.cells()) { if (!transformAddress(addressBuilder, lhsCell.first, ops)) { continue; } - CompactTensorAddressRef ref(addressBuilder.getAddressRef()); + SparseTensorAddressRef ref(addressBuilder.getAddressRef()); auto rhsItr = rhs.cells().find(ref); if (rhsItr != rhs.cells().end()) { addressPadder.padAddress(lhsCell.first); @@ -108,7 +108,7 @@ CompactTensorV2Match::slowMatch(const TensorImplType &lhs, } } -CompactTensorV2Match::CompactTensorV2Match(const TensorImplType &lhs, +SparseTensorMatch::SparseTensorMatch(const TensorImplType &lhs, const TensorImplType &rhs) : Parent(lhs.combineDimensionsWith(rhs)) { diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_match.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.h index 99fc3ce457a..f12fddc51f4 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_v2_match.h +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.h @@ -14,17 +14,17 @@ namespace tensor { * * If the two tensors have exactly the same dimensions, this is the Hadamard product. */ -class CompactTensorV2Match : public TensorOperation<CompactTensorV2> +class SparseTensorMatch : public TensorOperation<SparseTensor> { public: - using Parent = TensorOperation<CompactTensorV2>; + using Parent = TensorOperation<SparseTensor>; using typename Parent::TensorImplType; using Parent::_builder; private: void fastMatch(const TensorImplType &lhs, const TensorImplType &rhs); void slowMatch(const TensorImplType &lhs, const TensorImplType &rhs); public: - CompactTensorV2Match(const TensorImplType &lhs, const TensorImplType &rhs); + SparseTensorMatch(const TensorImplType &lhs, const TensorImplType &rhs); }; } // namespace vespalib::tensor diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp new file mode 100644 index 00000000000..45e6b727881 --- /dev/null +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp @@ -0,0 +1,62 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "sparse_tensor_address_reducer.h" +#include <vespa/vespalib/tensor/direct_tensor_builder.h> +#include "direct_sparse_tensor_builder.h" + +namespace vespalib { +namespace tensor { +namespace sparse { + +template <typename Function> +std::unique_ptr<Tensor> +reduceAll(const SparseTensor &tensor, + DirectTensorBuilder<SparseTensor> &builder, Function &&func) +{ + auto itr = tensor.cells().begin(); + auto itrEnd = tensor.cells().end(); + double result = 0.0; + if (itr != itrEnd) { + result = itr->second; + ++itr; + } + for (; itr != itrEnd; ++itr) { + result = func(result, itr->second); + } + builder.insertCell(SparseTensorAddressBuilder().getAddressRef(), result); + return builder.build(); +} + +template <typename Function> +std::unique_ptr<Tensor> +reduceAll(const SparseTensor &tensor, Function &&func) +{ + DirectTensorBuilder<SparseTensor> builder; + return reduceAll(tensor, builder, func); +} + +template <typename Function> +std::unique_ptr<Tensor> +reduce(const SparseTensor &tensor, + const std::vector<vespalib::string> &dimensions, Function &&func) +{ + if (dimensions.empty()) { + return reduceAll(tensor, func); + } + DirectTensorBuilder<SparseTensor> builder(TensorAddressReducer::remainingDimensions(tensor.dimensions(), dimensions)); + if (builder.dimensions().empty()) { + return reduceAll(tensor, builder, func); + } + TensorAddressReducer addressReducer(tensor.dimensions(), dimensions); + for (const auto &cell : tensor.cells()) { + addressReducer.reduce(cell.first); + builder.insertCell(addressReducer.getAddressRef(), cell.second, func); + } + return builder.build(); +} + +} // namespace vespalib::tensor::sparse +} // namespace vespalib::tensor +} // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_unsorted_address_builder.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.cpp index a88cd40afc2..57db0902396 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_unsorted_address_builder.cpp +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.cpp @@ -1,15 +1,14 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include <vespa/fastos/fastos.h> -#include "compact_tensor_unsorted_address_builder.h" -#include "compact_tensor_address_builder.h" -#include "compact_tensor_v2_address_builder.h" +#include "sparse_tensor_unsorted_address_builder.h" +#include "sparse_tensor_address_builder.h" #include <algorithm> namespace vespalib { namespace tensor { -CompactTensorUnsortedAddressBuilder::CompactTensorUnsortedAddressBuilder() +SparseTensorUnsortedAddressBuilder::SparseTensorUnsortedAddressBuilder() : _elementStrings(), _elements() { @@ -17,21 +16,7 @@ CompactTensorUnsortedAddressBuilder::CompactTensorUnsortedAddressBuilder() void -CompactTensorUnsortedAddressBuilder::buildTo(CompactTensorAddressBuilder & - builder) -{ - const char *base = &_elementStrings[0]; - std::sort(_elements.begin(), _elements.end(), - [=](const ElementRef &lhs, const ElementRef &rhs) - { return lhs.getDimension(base) < rhs.getDimension(base); }); - // build normalized address with sorted dimensions - for (const auto &element : _elements) { - builder.add(element.getDimension(base), element.getLabel(base)); - } -} - -void -CompactTensorUnsortedAddressBuilder::buildTo(CompactTensorV2AddressBuilder & +SparseTensorUnsortedAddressBuilder::buildTo(SparseTensorAddressBuilder & builder, const TensorDimensions & dimensions) diff --git a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_unsorted_address_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.h index a3c9b4d8ca0..914f7d6ce2f 100644 --- a/vespalib/src/vespa/vespalib/tensor/compact/compact_tensor_unsorted_address_builder.h +++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.h @@ -9,14 +9,13 @@ namespace vespalib { namespace tensor { -class CompactTensorAddressBuilder; -class CompactTensorV2AddressBuilder; +class SparseTensorAddressBuilder; /** * A builder that buffers up a tensor address with unsorted * dimensions. */ -class CompactTensorUnsortedAddressBuilder +class SparseTensorUnsortedAddressBuilder { struct ElementStringRef { @@ -62,7 +61,7 @@ class CompactTensorUnsortedAddressBuilder } public: - CompactTensorUnsortedAddressBuilder(); + SparseTensorUnsortedAddressBuilder(); bool empty() const { return _elementStrings.empty(); } void add(vespalib::stringref dimension, vespalib::stringref label) { @@ -72,8 +71,7 @@ public: * Sort the stored tensor address and pass it over to a strict * tensor address builder in sorted order. */ - void buildTo(CompactTensorAddressBuilder &builder); - void buildTo(CompactTensorV2AddressBuilder &builder, + void buildTo(SparseTensorAddressBuilder &builder, const TensorDimensions &dimensions); void clear() { _elementStrings.clear(); _elements.clear(); } }; diff --git a/vespalib/src/vespa/vespalib/tensor/tensor.h b/vespalib/src/vespa/vespalib/tensor/tensor.h index 4128a27d9a7..9e4f4a9bff0 100644 --- a/vespalib/src/vespa/vespalib/tensor/tensor.h +++ b/vespalib/src/vespa/vespalib/tensor/tensor.h @@ -6,9 +6,11 @@ #include "tensor_address.h" #include <vespa/vespalib/stllike/string.h> #include <vespa/vespalib/eval/tensor.h> +#include <vespa/vespalib/eval/tensor_spec.h> #include <vespa/vespalib/eval/value_type.h> namespace vespalib { +namespace eval { class BinaryOperation; } namespace tensor { class TensorVisitor; @@ -37,10 +39,16 @@ struct Tensor : public eval::Tensor virtual Tensor::UP match(const Tensor &arg) const = 0; virtual Tensor::UP apply(const CellFunction &func) const = 0; virtual Tensor::UP sum(const vespalib::string &dimension) const = 0; + virtual Tensor::UP apply(const eval::BinaryOperation &op, + const Tensor &arg) const = 0; + virtual Tensor::UP reduce(const eval::BinaryOperation &op, + const std::vector<vespalib::string> &dimensions) + const = 0; virtual bool equals(const Tensor &arg) const = 0; virtual void print(std::ostream &out) const = 0; virtual vespalib::string toString() const = 0; virtual Tensor::UP clone() const = 0; + virtual eval::TensorSpec toSpec() const = 0; virtual void accept(TensorVisitor &visitor) const = 0; }; diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h b/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h index 3a260c7c693..a250331de5f 100644 --- a/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h +++ b/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h @@ -25,68 +25,7 @@ public: bool valid() const { return (_itr != _itrEnd); } vespalib::stringref dimension() const { return _itr->dimension(); } vespalib::stringref label() const { return _itr->label(); } - template <class Iterator> - bool beforeDimension(const Iterator &rhs) const { - if (!valid()) { - return false; - } - if (!rhs.valid()) { - return true; - } - return (_itr->dimension() < rhs.dimension()); - } - bool atDimension(vespalib::stringref rhsDimension) const - { - return (valid() && (_itr->dimension() == rhsDimension)); - } void next() { ++_itr; } - template <class AddressBuilder> - void - addElement(AddressBuilder &builder) { - builder.add(_itr->dimension(), _itr->label()); - } - template <class AddressBuilder, class Iterator> - void addElements(AddressBuilder &builder, const Iterator &limit) - { - while (beforeDimension(limit)) { - addElement(builder); - next(); - } - } - template <class AddressBuilder, class Iterator> - bool addElements(AddressBuilder &builder, const DimensionsSet &dims, - const Iterator &limit) - { - do { - if (dims.find(_itr->dimension()) != dims.end()) { - return false; - } - addElement(builder); - next(); - } while (beforeDimension(limit)); - return true; - } - template <class AddressBuilder> - void addElements(AddressBuilder &builder) - { - while (valid()) { - addElement(builder); - next(); - } - } - template <class AddressBuilder> - bool addElements(AddressBuilder &builder, const DimensionsSet &dims) - { - while (valid()) { - if (dims.find(_itr->dimension()) != dims.end()) { - return false; - } - addElement(builder); - next(); - } - return true; - } - bool skipToDimension(vespalib::stringref rhsDimension) { for (;;) { if (!valid()) { @@ -101,118 +40,5 @@ public: } }; - -/** - * An iterator for tensor address elements used to simplify 3-way merge - * between two tensor addresses and a dimension vector. - * This is a specialization to perform decoding on the fly while iterating. - */ -template <> -class TensorAddressElementIterator<CompactTensorAddressRef> { - const char *_itr; - const char *_itrEnd; - vespalib::stringref _dimension; - vespalib::stringref _label; - - size_t - simple_strlen(const char *str) { - const char *strend = str; - for (; *strend != '\0'; ++strend) { - } - return (strend - str); - } - - void decodeElement() - { - _dimension = vespalib::stringref(_itr, simple_strlen(_itr)); - const char *labelp = _dimension.c_str() + _dimension.size() + 1; - _label = vespalib::stringref(labelp, simple_strlen(labelp)); - _itr = _label.c_str() + _label.size() + 1; - } -public: - TensorAddressElementIterator(CompactTensorAddressRef address) - : _itr(static_cast<const char *>(address.start())), - _itrEnd(_itr + address.size()), - _dimension(), - _label() - { - if (_itr != _itrEnd) { - decodeElement(); - } - } - bool valid() const { return (_dimension.size() != 0u); } - vespalib::stringref dimension() const { return _dimension; } - vespalib::stringref label() const { return _label; } - template <class Iterator> - bool beforeDimension(const Iterator &rhs) const { - if (!valid()) { - return false; - } - if (!rhs.valid()) { - return true; - } - return (_dimension < rhs.dimension()); - } - bool atDimension(vespalib::stringref rhsDimension) const - { - return (_dimension == rhsDimension); - } - void next() { - if (_itr != _itrEnd) { - decodeElement(); - } else { - _dimension = vespalib::stringref(); - _label = vespalib::stringref(); - } - } - template <class AddressBuilder> - void - addElement(AddressBuilder &builder) { - builder.add(_dimension, _label); - } - template <class AddressBuilder, class Iterator> - void addElements(AddressBuilder &builder, const Iterator &limit) - { - while (beforeDimension(limit)) { - addElement(builder); - next(); - } - } - template <class AddressBuilder, class Iterator> - bool addElements(AddressBuilder &builder, const DimensionsSet &dims, - const Iterator &limit) - { - do { - if (dims.find(_dimension) != dims.end()) { - return false; - } - addElement(builder); - next(); - } while (beforeDimension(limit)); - return true; - } - template <class AddressBuilder> - void addElements(AddressBuilder &builder) - { - while (valid()) { - addElement(builder); - next(); - } - } - template <class AddressBuilder> - bool addElements(AddressBuilder &builder, const DimensionsSet &dims) - { - while (valid()) { - if (dims.find(_dimension) != dims.end()) { - return false; - } - addElement(builder); - next(); - } - return true; - } -}; - - } // namespace vespalib::tensor } // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_apply.cpp b/vespalib/src/vespa/vespalib/tensor/tensor_apply.cpp index ad9cab61243..7f0293f6349 100644 --- a/vespalib/src/vespa/vespalib/tensor/tensor_apply.cpp +++ b/vespalib/src/vespa/vespalib/tensor/tensor_apply.cpp @@ -16,9 +16,7 @@ TensorApply<TensorT>::TensorApply(const TensorImplType &tensor, } } -template class TensorApply<SimpleTensor>; -template class TensorApply<CompactTensor>; -template class TensorApply<CompactTensorV2>; +template class TensorApply<SparseTensor>; } // namespace vespalib::tensor } // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_apply.h b/vespalib/src/vespa/vespalib/tensor/tensor_apply.h index 7fc9cd2ccfa..52be67ed30c 100644 --- a/vespalib/src/vespa/vespalib/tensor/tensor_apply.h +++ b/vespalib/src/vespa/vespalib/tensor/tensor_apply.h @@ -21,9 +21,7 @@ public: TensorApply(const TensorImplType &tensor, const CellFunction &func); }; -extern template class TensorApply<SimpleTensor>; -extern template class TensorApply<CompactTensor>; -extern template class TensorApply<CompactTensorV2>; +extern template class TensorApply<SparseTensor>; } // namespace vespalib::tensor } // namespace vespalib diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp b/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp index 80596672d5a..a527627d786 100644 --- a/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp +++ b/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp @@ -5,9 +5,7 @@ #include "tensor_mapper.h" #include "tensor.h" #include "tensor_visitor.h" -#include <vespa/vespalib/tensor/simple/direct_simple_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/direct_compact_tensor_v2_builder.h> -#include <vespa/vespalib/tensor/compact/direct_compact_tensor_builder.h> +#include <vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h> #include <vespa/vespalib/tensor/dense/dense_tensor.h> #include "tensor_address_element_iterator.h" #include "default_tensor.h" @@ -82,7 +80,7 @@ SparseTensorMapper<TensorT>::build() template <> void -SparseTensorMapper<CompactTensorV2>:: +SparseTensorMapper<SparseTensor>:: mapAddress(const TensorAddress &address) { _addressBuilder.clear(); @@ -100,22 +98,6 @@ mapAddress(const TensorAddress &address) template <class TensorT> void -SparseTensorMapper<TensorT>::mapAddress(const TensorAddress &address) -{ - _addressBuilder.clear(); - TensorAddressElementIterator<TensorAddress> addressIterator(address); - for (const auto &dimension : _builder.dimensions()) { - if (addressIterator.skipToDimension(dimension)) { - _addressBuilder.add(dimension, addressIterator.label()); - addressIterator.next(); - } else { - // output dimension not in input - } - } -} - -template <class TensorT> -void SparseTensorMapper<TensorT>::visit(const TensorAddress &address, double value) { mapAddress(address); @@ -274,17 +256,7 @@ TensorMapper::map(const Tensor &tensor) const template std::unique_ptr<Tensor> -TensorMapper::mapToSparse<SimpleTensor>(const Tensor &tensor, - const ValueType &type); - -template -std::unique_ptr<Tensor> -TensorMapper::mapToSparse<CompactTensor>(const Tensor &tensor, - const ValueType &type); - -template -std::unique_ptr<Tensor> -TensorMapper::mapToSparse<CompactTensorV2>(const Tensor &tensor, +TensorMapper::mapToSparse<SparseTensor>(const Tensor &tensor, const ValueType &type); } // namespace vespalib::tensor diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_operation.h b/vespalib/src/vespa/vespalib/tensor/tensor_operation.h index e115ec153b7..350dfcc8abc 100644 --- a/vespalib/src/vespa/vespalib/tensor/tensor_operation.h +++ b/vespalib/src/vespa/vespalib/tensor/tensor_operation.h @@ -2,11 +2,8 @@ #pragma once -#include <vespa/vespalib/tensor/simple/simple_tensor.h> #include "direct_tensor_builder.h" -#include <vespa/vespalib/tensor/simple/direct_simple_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/direct_compact_tensor_builder.h> -#include <vespa/vespalib/tensor/compact/direct_compact_tensor_v2_builder.h> +#include <vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h> namespace vespalib { namespace tensor { @@ -24,7 +21,6 @@ public: using Cells = typename TensorImplType::Cells; using AddressBuilderType = typename MyTensorBuilder::AddressBuilderType; using AddressRefType = typename MyTensorBuilder::AddressRefType; - using AddressType = typename MyTensorBuilder::AddressType; protected: MyTensorBuilder _builder; Dimensions &_dimensions; diff --git a/vespalib/src/vespa/vespalib/test/insertion_operators.h b/vespalib/src/vespa/vespalib/test/insertion_operators.h index 8ed52062281..ac4fa3541e3 100644 --- a/vespalib/src/vespa/vespalib/test/insertion_operators.h +++ b/vespalib/src/vespa/vespalib/test/insertion_operators.h @@ -1,6 +1,7 @@ // Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once +#include <map> #include <ostream> #include <set> #include <vector> @@ -41,5 +42,22 @@ operator<<(std::ostream &os, const std::vector<T> &set) return os; } +template <typename K, typename V> +std::ostream & +operator<<(std::ostream &os, const std::map<K, V> &map) +{ + os << "{"; + bool first = true; + for (const auto &entry : map) { + if (!first) { + os << ","; + } + os << "{" << entry.first << "," << entry.second << "}"; + first = false; + } + os << "}"; + return os; +} + } // namespace std diff --git a/vespalib/src/vespa/vespalib/util/CMakeLists.txt b/vespalib/src/vespa/vespalib/util/CMakeLists.txt index fe739b066d4..261c33c8135 100644 --- a/vespalib/src/vespa/vespalib/util/CMakeLists.txt +++ b/vespalib/src/vespa/vespalib/util/CMakeLists.txt @@ -11,6 +11,7 @@ vespa_add_library(vespalib_vespalib_util OBJECT benchmark_timer.cpp blockingthreadstackexecutor.cpp box.cpp + classname.cpp compress.cpp dual_merge_director.cpp error.cpp diff --git a/vespalib/src/vespa/vespalib/util/alloc.cpp b/vespalib/src/vespa/vespalib/util/alloc.cpp index 66f40366f52..b037535a635 100644 --- a/vespalib/src/vespa/vespalib/util/alloc.cpp +++ b/vespalib/src/vespa/vespalib/util/alloc.cpp @@ -13,26 +13,12 @@ #include <vespa/log/log.h> #include <map> #include <atomic> +#include <unordered_map> LOG_SETUP(".vespalib.alloc"); namespace vespalib { -void * AlignedHeapAlloc::alloc(size_t sz, size_t alignment) -{ - if (!sz) { - return 0; - } - void* ptr; - int result = posix_memalign(&ptr, alignment, sz); - if (result != 0) { - throw IllegalArgumentException( - make_string("posix_memalign(%zu, %zu) failed with code %d", - sz, alignment, result)); - } - return ptr; -} - namespace { volatile bool _G_hasHugePageFailureJustHappened(false); @@ -99,9 +85,178 @@ size_t sum(const MMapStore & s) return sum; } +class MMapLimitAndAlignment { +public: + MMapLimitAndAlignment(size_t mmapLimit, size_t alignment); + uint32_t hash() const { return _key; } + bool operator == (MMapLimitAndAlignment rhs) const { return _key == rhs._key; } +private: + uint32_t _key; +}; + +void verifyMMapLimitAndAlignment(size_t mmapLimit, size_t alignment) __attribute__((noinline)); + +void verifyMMapLimitAndAlignment(size_t mmapLimit, size_t alignment) { + if ((0x01ul << Optimized::msbIdx(mmapLimit)) != mmapLimit) { + throw IllegalArgumentException(make_string("We only support mmaplimit(%0lx) to be a power of 2", mmapLimit)); + } + if ((alignment != 0) && (0x01ul << Optimized::msbIdx(alignment)) != alignment) { + throw IllegalArgumentException(make_string("We only support alignment(%0lx) to be a power of 2", alignment)); + } } -void * MMapAlloc::alloc(size_t sz) +MMapLimitAndAlignment::MMapLimitAndAlignment(size_t mmapLimit, size_t alignment) : + _key(Optimized::msbIdx(mmapLimit) | Optimized::msbIdx(alignment) << 6) +{ + verifyMMapLimitAndAlignment(mmapLimit, alignment); +} +} + +namespace alloc { + +class HeapAllocator : public MemoryAllocator { +public: + void * alloc(size_t sz) const override; + void free(void * buf, size_t sz) const override; + static void * salloc(size_t sz); + static void sfree(void * buf, size_t sz); + static MemoryAllocator & getDefault(); +}; + +class AlignedHeapAllocator : public HeapAllocator { +public: + AlignedHeapAllocator(size_t alignment) : _alignment(alignment) { } + void * alloc(size_t sz) const override; + static MemoryAllocator & get4K(); + static MemoryAllocator & get1K(); + static MemoryAllocator & get512B(); +private: + size_t _alignment; +}; + +class MMapAllocator : public MemoryAllocator { +public: + void * alloc(size_t sz) const override; + void free(void * buf, size_t sz) const override; + static void * salloc(size_t sz); + static void sfree(void * buf, size_t sz); + static MemoryAllocator & getDefault(); +}; + +class AutoAllocator : public MemoryAllocator { +public: + AutoAllocator(size_t mmapLimit, size_t alignment) : _mmapLimit(mmapLimit), _alignment(alignment) { } + void * alloc(size_t sz) const override; + void free(void * buf, size_t sz) const override; + static MemoryAllocator & getDefault(); + static MemoryAllocator & getAllocator(size_t mmapLimit, size_t alignment); +private: + size_t roundUpToHugePages(size_t sz) const { + return (_mmapLimit >= MemoryAllocator::HUGEPAGE_SIZE) + ? MMapAllocator::roundUpToHugePages(sz) + : sz; + } + bool useMMap(size_t sz) const { return (sz >= _mmapLimit); } + size_t _mmapLimit; + size_t _alignment; +}; + + +namespace { + +struct MMapLimitAndAlignmentHash { + std::size_t operator ()(MMapLimitAndAlignment key) const { return key.hash(); } +}; + +using AutoAllocatorsMap = std::unordered_map<MMapLimitAndAlignment, AutoAllocator::UP, MMapLimitAndAlignmentHash>; + +AutoAllocatorsMap createAutoAllocators() { + AutoAllocatorsMap map; + map.reserve(15); + for (size_t alignment : {0,0x200, 0x400, 0x1000}) { + for (size_t pages : {1,2,4,8,16}) { + size_t mmapLimit = pages * MemoryAllocator::HUGEPAGE_SIZE; + MMapLimitAndAlignment key(mmapLimit, alignment); + auto result = map.emplace(key, AutoAllocator::UP(new AutoAllocator(mmapLimit, alignment))); + assert( result.second ); + } + } + return map; +} + +AutoAllocatorsMap _G_availableAutoAllocators = createAutoAllocators(); +alloc::HeapAllocator _G_heapAllocatorDefault; +alloc::AlignedHeapAllocator _G_4KalignedHeapAllocator(1024); +alloc::AlignedHeapAllocator _G_1KalignedHeapAllocator(4096); +alloc::AlignedHeapAllocator _G_512BalignedHeapAllocator(512); +alloc::MMapAllocator _G_mmapAllocatorDefault; + +} + +MemoryAllocator & HeapAllocator::getDefault() { + return _G_heapAllocatorDefault; +} + +MemoryAllocator & AlignedHeapAllocator::get4K() { + return _G_4KalignedHeapAllocator; +} + +MemoryAllocator & AlignedHeapAllocator::get1K() { + return _G_1KalignedHeapAllocator; +} + +MemoryAllocator & AlignedHeapAllocator::get512B() { + return _G_512BalignedHeapAllocator; +} + +MemoryAllocator & MMapAllocator::getDefault() { + return _G_mmapAllocatorDefault; +} + +MemoryAllocator & AutoAllocator::getDefault() { + return getAllocator(1 * MemoryAllocator::HUGEPAGE_SIZE, 0); +} + +MemoryAllocator & AutoAllocator::getAllocator(size_t mmapLimit, size_t alignment) { + MMapLimitAndAlignment key(mmapLimit, alignment); + auto found = _G_availableAutoAllocators.find(key); + if (found == _G_availableAutoAllocators.end()) { + throw IllegalArgumentException(make_string("We currently have no support for mmapLimit(%0lx) and alignment(%0lx)", mmapLimit, alignment)); + } + return *(found->second); +} + +void * HeapAllocator::alloc(size_t sz) const { + return salloc(sz); +} + +void * HeapAllocator::salloc(size_t sz) { + return (sz > 0) ? malloc(sz) : 0; +} + +void HeapAllocator::free(void * p, size_t sz) const { + sfree(p, sz); +} + +void HeapAllocator::sfree(void * p, size_t sz) { + (void) sz; if (p) { ::free(p); } +} + +void * AlignedHeapAllocator::alloc(size_t sz) const { + if (!sz) { return 0; } + void* ptr; + int result = posix_memalign(&ptr, _alignment, sz); + if (result != 0) { + throw IllegalArgumentException(make_string("posix_memalign(%zu, %zu) failed with code %d", sz, _alignment, result)); + } + return ptr; +} + +void * MMapAllocator::alloc(size_t sz) const { + return salloc(sz); +} + +void * MMapAllocator::salloc(size_t sz) { void * buf(nullptr); if (sz > 0) { @@ -152,7 +307,11 @@ void * MMapAlloc::alloc(size_t sz) return buf; } -void MMapAlloc::free(void * buf, size_t sz) +void MMapAllocator::free(void * buf, size_t sz) const { + sfree(buf, sz); +} + +void MMapAllocator::sfree(void * buf, size_t sz) { if (buf != nullptr) { madvise(buf, sz, MADV_DONTNEED); @@ -167,4 +326,61 @@ void MMapAlloc::free(void * buf, size_t sz) } } +void * AutoAllocator::alloc(size_t sz) const { + if (useMMap(sz)) { + sz = roundUpToHugePages(sz); + return MMapAllocator::salloc(sz); + } else { + if (_alignment == 0) { + return HeapAllocator::salloc(sz); + } else { + return AlignedHeapAllocator(_alignment).alloc(sz); + } + } +} + +void AutoAllocator::free(void *p, size_t sz) const { + if (useMMap(sz)) { + return MMapAllocator::sfree(p, sz); + } else { + return HeapAllocator::sfree(p, sz); + } +} + +Alloc +HeapAllocFactory::create(size_t sz) +{ + return Alloc(&HeapAllocator::getDefault(), sz); +} + +Alloc +AlignedHeapAllocFactory::create(size_t sz, size_t alignment) +{ + if (alignment == 0) { + return Alloc(&AlignedHeapAllocator::getDefault(), sz); + } else if (alignment == 0x200) { + return Alloc(&AlignedHeapAllocator::get512B(), sz); + } else if (alignment == 0x400) { + return Alloc(&AlignedHeapAllocator::get1K(), sz); + } else if (alignment == 0x1000) { + return Alloc(&AlignedHeapAllocator::get4K(), sz); + } else { + throw IllegalArgumentException(make_string("AlignedHeapAllocFactory::create(%zu, %zu) does not support %zu alignment", sz, alignment, alignment)); + } +} + +Alloc +MMapAllocFactory::create(size_t sz) +{ + return Alloc(&MMapAllocator::getDefault(), sz); +} + +Alloc +AutoAllocFactory::create(size_t sz, size_t mmapLimit, size_t alignment) +{ + return Alloc(&AutoAllocator::getAllocator(mmapLimit, alignment), sz); +} + +} + } diff --git a/vespalib/src/vespa/vespalib/util/alloc.h b/vespalib/src/vespa/vespalib/util/alloc.h index 559076c7acd..74366439f2c 100644 --- a/vespalib/src/vespa/vespalib/util/alloc.h +++ b/vespalib/src/vespa/vespalib/util/alloc.h @@ -4,126 +4,98 @@ #include <sys/types.h> #include <algorithm> #include <vespa/vespalib/util/linkedptr.h> -#include <vespa/vespalib/util/noncopyable.hpp> #include <vespa/vespalib/util/optimized.h> namespace vespalib { -inline size_t roundUp2inN(size_t minimum) { - return 2ul << Optimized::msbIdx(minimum - 1); -} +namespace alloc { + +class MemoryAllocator { +public: + enum {HUGEPAGE_SIZE=0x200000}; + using UP = std::unique_ptr<MemoryAllocator>; + MemoryAllocator(const MemoryAllocator &) = delete; + MemoryAllocator & operator = (const MemoryAllocator &) = delete; + MemoryAllocator() { } + virtual ~MemoryAllocator() { } + virtual void * alloc(size_t sz) const = 0; + virtual void free(void * buf, size_t sz) const = 0; + static size_t roundUpToHugePages(size_t sz) { + return (sz+(HUGEPAGE_SIZE-1)) & ~(HUGEPAGE_SIZE-1); + } +}; + /** - * This is an allocated buffer interface that does not accept virtual inheritance. + * This represents an allocation. + * It can be created, moved, swapped. + * The allocation strategy is decided upon creation. + * It can also create create additional allocations with the same allocation strategy. **/ -class Alloc : public noncopyable +class Alloc { public: + using MemoryAllocator = alloc::MemoryAllocator; size_t size() const { return _sz; } void * get() { return _buf; } const void * get() const { return _buf; } void * operator -> () { return _buf; } const void * operator -> () const { return _buf; } -protected: + Alloc(const Alloc &) = delete; + Alloc & operator = (const Alloc &) = delete; Alloc(Alloc && rhs) : _buf(rhs._buf), - _sz(rhs._sz) + _sz(rhs._sz), + _allocator(rhs._allocator) { rhs._buf = nullptr; rhs._sz = 0; + rhs._allocator = 0; } Alloc & operator=(Alloc && rhs) { if (this != & rhs) { - internalSwap(rhs); + swap(rhs); } return *this; } - Alloc(void * buf, size_t sz) : _buf(buf), _sz(sz) { } - ~Alloc() { _buf = 0; } - void internalSwap(Alloc & rhs) { + Alloc() : _buf(nullptr), _sz(0), _allocator(nullptr) { } + Alloc(const MemoryAllocator * allocator, size_t sz) : _buf(allocator->alloc(sz)), _sz(sz), _allocator(allocator) { } + ~Alloc() { + if (_buf != nullptr) { + _allocator->free(_buf, _sz); + _buf = nullptr; + } + } + void swap(Alloc & rhs) { std::swap(_buf, rhs._buf); std::swap(_sz, rhs._sz); + std::swap(_allocator, rhs._allocator); } -private: - void * _buf; - size_t _sz; + Alloc create(size_t sz) const { + return Alloc(_allocator, sz); + } +protected: + void * _buf; + size_t _sz; + const MemoryAllocator * _allocator; }; -class HeapAlloc : public Alloc +class HeapAllocFactory { public: - typedef std::unique_ptr<HeapAlloc> UP; - HeapAlloc() : Alloc(NULL, 0) { } - HeapAlloc(size_t sz) : Alloc(HeapAlloc::alloc(sz), sz) { } - ~HeapAlloc() { HeapAlloc::free(get(), size()); } - HeapAlloc(HeapAlloc && rhs) : Alloc(std::move(rhs)) { } - - HeapAlloc & operator=(HeapAlloc && rhs) { - Alloc::operator=(std::move(rhs)); - return *this; - } - void swap(HeapAlloc & rhs) { internalSwap(rhs); } -public: - static void * alloc(size_t sz) { return (sz > 0) ? malloc(sz) : 0; } - static void free(void * buf, size_t sz) { (void) sz; if (buf) { ::free(buf); } } + static Alloc create(size_t sz=0); }; -class AlignedHeapAlloc : public Alloc +class AlignedHeapAllocFactory { public: - typedef std::unique_ptr<AlignedHeapAlloc> UP; - AlignedHeapAlloc() : Alloc(NULL, 0) { } - AlignedHeapAlloc(size_t sz, size_t alignment) - : Alloc(AlignedHeapAlloc::alloc(sz, alignment), sz) { } - AlignedHeapAlloc(AlignedHeapAlloc && rhs) : Alloc(std::move(rhs)) { } - - AlignedHeapAlloc & operator=(AlignedHeapAlloc && rhs) { - Alloc::operator=(std::move(rhs)); - return *this; - } - ~AlignedHeapAlloc() { AlignedHeapAlloc::free(get(), size()); } - void swap(AlignedHeapAlloc & rhs) { internalSwap(rhs); } -public: - static void * alloc(size_t sz, size_t alignment); - static void free(void * buf, size_t sz) { (void) sz; if (buf) { ::free(buf); } } + static Alloc create(size_t sz, size_t alignment); }; - -class MMapAlloc : public Alloc +class MMapAllocFactory { public: enum {HUGEPAGE_SIZE=0x200000}; - typedef std::unique_ptr<MMapAlloc> UP; - MMapAlloc() : Alloc(NULL, 0) { } - MMapAlloc(size_t sz) : Alloc(MMapAlloc::alloc(sz), sz) { } - MMapAlloc(MMapAlloc && rhs) : Alloc(std::move(rhs)) { } - - MMapAlloc & operator=(MMapAlloc && rhs) { - Alloc::operator=(std::move(rhs)); - return *this; - } - ~MMapAlloc() { MMapAlloc::free(get(), size()); } - void swap(MMapAlloc & rhs) { internalSwap(rhs); } -public: - static void * alloc(size_t sz); - static void free(void * buf, size_t sz); -}; - -// Alignment requirement is != 0, use posix_memalign -template <size_t Alignment> -struct ChooseHeapAlloc -{ - static inline void* alloc(size_t sz) { - return AlignedHeapAlloc::alloc(sz, Alignment); - } -}; - -// No alignment required, use regular malloc -template <> -struct ChooseHeapAlloc<0> -{ - static inline void* alloc(size_t sz) { - return HeapAlloc::alloc(sz); - } + static Alloc create(size_t sz=0); }; /** @@ -131,53 +103,18 @@ struct ChooseHeapAlloc<0> * is always used when size is above limit. */ -template <size_t Lim=MMapAlloc::HUGEPAGE_SIZE, size_t Alignment=0> -class AutoAlloc : public Alloc +class AutoAllocFactory { public: - typedef std::unique_ptr<AutoAlloc> UP; - typedef vespalib::LinkedPtr<AutoAlloc> LP; - AutoAlloc() : Alloc(NULL, 0) { } - AutoAlloc(size_t sz) - : Alloc(useMMap(sz) - ? MMapAlloc::alloc(roundUpToHugePages(sz)) - : ChooseHeapAlloc<Alignment>::alloc(sz), - useMMap(sz) - ? roundUpToHugePages(sz) - : sz) - { } - AutoAlloc(AutoAlloc && rhs) : Alloc(std::move(rhs)) { } - - AutoAlloc & operator=(AutoAlloc && rhs) { - Alloc::operator=(std::move(rhs)); - return *this; - } - - ~AutoAlloc() { - if (useMMap(size())) { - MMapAlloc::free(get(), size()); - } else { - HeapAlloc::free(get(), size()); - } - } - void swap(AutoAlloc & rhs) { internalSwap(rhs); } -private: - static size_t roundUpToHugePages(size_t sz) { - return (Lim >= MMapAlloc::HUGEPAGE_SIZE) - ? (sz+(MMapAlloc::HUGEPAGE_SIZE-1)) & ~(MMapAlloc::HUGEPAGE_SIZE-1) - : sz; - } - static bool useMMap(size_t sz) { return (sz >= Lim); } + static Alloc create(size_t sz=0, size_t mmapLimit=MemoryAllocator::HUGEPAGE_SIZE, size_t alignment=0); }; -template <size_t Lim> -inline void swap(AutoAlloc<Lim> & a, AutoAlloc<Lim> & b) { a.swap(b); } +} -inline void swap(HeapAlloc & a, HeapAlloc & b) { a.swap(b); } -inline void swap(AlignedHeapAlloc & a, AlignedHeapAlloc & b) { a.swap(b); } -inline void swap(MMapAlloc & a, MMapAlloc & b) { a.swap(b); } +inline size_t roundUp2inN(size_t minimum) { + return 2ul << Optimized::msbIdx(minimum - 1); +} -typedef AutoAlloc<> DefaultAlloc; +using DefaultAlloc = alloc::AutoAllocFactory; } - diff --git a/vespalib/src/vespa/vespalib/util/array.h b/vespalib/src/vespa/vespalib/util/array.h index 8f40d4de0c7..f2546e46e22 100644 --- a/vespalib/src/vespa/vespalib/util/array.h +++ b/vespalib/src/vespa/vespalib/util/array.h @@ -35,7 +35,7 @@ private: * it generates more efficient code. * It only supports simple objects without constructors/destructors. **/ -template <typename T, typename B=HeapAlloc> +template <typename T> class Array { public: class reverse_iterator { @@ -101,18 +101,19 @@ public: friend size_t operator -(const_reverse_iterator a, const_reverse_iterator b) { return b._p - a._p; } const T * _p; }; + using Alloc = alloc::Alloc; typedef const T * const_iterator; typedef T * iterator; typedef const T & const_reference; typedef T value_type; typedef size_t size_type; - Array() : _array(), _sz(0) { } - Array(size_t sz); - Array(B && buf, size_t sz); + Array(const Alloc & initial=DefaultAlloc::create()) : _array(initial.create(0)), _sz(0) { } + Array(size_t sz, const Alloc & initial=DefaultAlloc::create()); + Array(Alloc && buf, size_t sz); Array(Array &&rhs); - Array(size_t sz, T value); - Array(const_iterator begin, const_iterator end); + Array(size_t sz, T value, const Alloc & initial=DefaultAlloc::create()); + Array(const_iterator begin, const_iterator end, const Alloc & initial=DefaultAlloc::create()); Array(const Array & rhs); Array & operator =(const Array & rhs) { if (&rhs != this) { @@ -185,7 +186,7 @@ private: reserve(roundUp2inN(n)); } } - B _array; + Alloc _array; size_t _sz; }; @@ -236,16 +237,16 @@ void construct(T * dest, size_t sz, T val, std::tr1::true_type) } } -template <typename T, typename B> -Array<T, B>::Array(const Array & rhs) - : _array(rhs.size() * sizeof(T)), +template <typename T> +Array<T>::Array(const Array & rhs) + : _array(rhs._array.create(rhs.size() * sizeof(T))), _sz(rhs.size()) { construct(array(0), rhs.array(0), _sz, std::tr1::has_trivial_destructor<T>()); } -template <typename T, typename B> -bool Array<T, B>::operator ==(const Array & rhs) const +template <typename T> +bool Array<T>::operator ==(const Array & rhs) const { bool retval(size() == rhs.size()); for (size_t i(0); retval && (i < _sz); i++) { @@ -256,8 +257,8 @@ bool Array<T, B>::operator ==(const Array & rhs) const return retval; } -template <typename T, typename B> -void Array<T, B>::resize(size_t n) +template <typename T> +void Array<T>::resize(size_t n) { if (n > capacity()) { reserve(n); @@ -285,69 +286,68 @@ void move(T * dest, const T * source, size_t sz, std::tr1::true_type) memcpy(dest, source, sz*sizeof(T)); } -template <typename T, typename B> -void Array<T, B>::increase(size_t n) +template <typename T> +void Array<T>::increase(size_t n) { - B newArray(sizeof(T)*n); + Alloc newArray(_array.create(sizeof(T)*n)); if (capacity() > 0) { move(static_cast<T *>(newArray.get()), array(0), _sz, std::tr1::has_trivial_destructor<T>()); } _array.swap(newArray); } -template <typename T, typename B> -Array<T, B>::Array(B && buf, size_t sz) : +template <typename T> +Array<T>::Array(Alloc && buf, size_t sz) : _array(std::move(buf)), _sz(sz) { } -template <typename T, typename B> -Array<T, B>::Array(Array &&rhs) +template <typename T> +Array<T>::Array(Array &&rhs) : _array(std::move(rhs._array)), _sz(rhs._sz) { rhs._sz = 0; } -template <typename T, typename B> -Array<T, B>::Array(size_t sz) : - _array(sz * sizeof(T)), +template <typename T> +Array<T>::Array(size_t sz, const Alloc & initial) : + _array(initial.create(sz * sizeof(T))), _sz(sz) { construct(array(0), _sz, std::tr1::has_trivial_destructor<T>()); } -template <typename T, typename B> -Array<T, B>::Array(size_t sz, T value) : - _array(sz * sizeof(T)), +template <typename T> +Array<T>::Array(size_t sz, T value, const Alloc & initial) : + _array(initial.create(sz * sizeof(T))), _sz(sz) { construct(array(0), _sz, value, std::tr1::has_trivial_destructor<T>()); } -template <typename T, typename B> -Array<T, B>::Array(const_iterator begin_, const_iterator end_) : - _array(begin_ != end_ ? sizeof(T) * (end_-begin_) : 0), +template <typename T> +Array<T>::Array(const_iterator begin_, const_iterator end_, const Alloc & initial) : + _array(initial.create(begin_ != end_ ? sizeof(T) * (end_-begin_) : 0)), _sz(end_-begin_) { construct(array(0), begin_, _sz, std::tr1::has_trivial_destructor<T>()); } -template <typename T, typename B> -Array<T, B>::~Array() +template <typename T> +Array<T>::~Array() { cleanup(); } -template <typename T, typename B> -void Array<T, B>::cleanup() +template <typename T> +void Array<T>::cleanup() { std::_Destroy(array(0), array(_sz)); _sz = 0; - B tmp; - tmp.swap(_array); + Alloc().swap(_array); } } diff --git a/vespalib/src/vespa/vespalib/util/backtrace.cpp b/vespalib/src/vespa/vespalib/util/backtrace.cpp index f6d07718a51..48fb07410ec 100644 --- a/vespalib/src/vespa/vespalib/util/backtrace.cpp +++ b/vespalib/src/vespa/vespalib/util/backtrace.cpp @@ -3,10 +3,10 @@ #include <vespa/fastos/fastos.h> #include <vespa/vespalib/util/backtrace.h> #include <vespa/vespalib/util/memory.h> +#include <vespa/vespalib/util/classname.h> #include <vespa/vespalib/stllike/string.h> #include <vespa/vespalib/stllike/asciistream.h> #include <vespa/fastos/backtrace.h> -#include <cxxabi.h> #include <execinfo.h> #include <signal.h> @@ -31,11 +31,8 @@ demangleBacktraceLine(const vespalib::string& line) size_t symEnd = line.find_first_of('+', symBegin); if (symEnd != vespalib::string::npos) { vespalib::string mangled = line.substr(symBegin + 1, symEnd - symBegin - 1); - int status; - char* demangled = abi::__cxa_demangle(mangled.c_str(), NULL, NULL, &status); - vespalib::MallocAutoPtr demangleScoped(demangled); - - if (status == 0 && demangled != NULL) { + vespalib::string demangled = vespalib::demangle(mangled.c_str()); + if ( ! demangled.empty()) { // Create string matching original backtrace line format, // except with demangled function signature vespalib::string ret(line.c_str(), symBegin + 1); diff --git a/vespalib/src/vespa/vespalib/util/classname.cpp b/vespalib/src/vespa/vespalib/util/classname.cpp new file mode 100644 index 00000000000..0fa9daceb3b --- /dev/null +++ b/vespalib/src/vespa/vespalib/util/classname.cpp @@ -0,0 +1,17 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include <vespa/vespalib/util/classname.h> +#include <cxxabi.h> + +namespace vespalib { + +string demangle(const char * native) { + int status = 0; + size_t size = 0; + char *unmangled = abi::__cxa_demangle(native, 0, &size, &status); + string result(unmangled); + free(unmangled); + return result; +} + +} diff --git a/vespalib/src/vespa/vespalib/util/classname.h b/vespalib/src/vespa/vespalib/util/classname.h new file mode 100644 index 00000000000..b99e5a7d2c6 --- /dev/null +++ b/vespalib/src/vespa/vespalib/util/classname.h @@ -0,0 +1,16 @@ +// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +#include <vespa/vespalib/stllike/string.h> + +namespace vespalib { + +string demangle(const char * native); + +template <typename T> +string +getClassName(const T & obj) { + return demangle(typeid(obj).name()); +} + +} diff --git a/vespalib/src/vespa/vespalib/util/exceptions.cpp b/vespalib/src/vespa/vespalib/util/exceptions.cpp index ce1de7d569a..99485422493 100644 --- a/vespalib/src/vespa/vespalib/util/exceptions.cpp +++ b/vespalib/src/vespa/vespalib/util/exceptions.cpp @@ -39,6 +39,11 @@ void silent_terminate() { } +const char * +ExceptionWithPayload::what() const noexcept { + return _msg.c_str(); +} + SilenceUncaughtException::SilenceUncaughtException(const std::exception & e) : _oldTerminate(std::set_terminate(silent_terminate)) { diff --git a/vespalib/src/vespa/vespalib/util/exceptions.h b/vespalib/src/vespa/vespalib/util/exceptions.h index 721829e90d8..0fc0e96e193 100644 --- a/vespalib/src/vespa/vespalib/util/exceptions.h +++ b/vespalib/src/vespa/vespalib/util/exceptions.h @@ -71,6 +71,7 @@ public: ExceptionWithPayload(vespalib::stringref msg) : std::exception(), _msg(msg), _payload() { } ExceptionWithPayload(vespalib::stringref msg, Anything::UP payload) : std::exception(), _msg(msg), _payload(std::move(payload)) { } void setPayload(Anything::UP payload) { _payload = std::move(payload); } + const char * what() const noexcept override; private: vespalib::string _msg; Anything::UP _payload; diff --git a/vespalib/src/vespa/vespalib/util/printable.h b/vespalib/src/vespa/vespalib/util/printable.h index cb1e299994c..7ce1ee073ed 100644 --- a/vespalib/src/vespa/vespalib/util/printable.h +++ b/vespalib/src/vespa/vespalib/util/printable.h @@ -113,11 +113,8 @@ public: vespalib::string indent(uint32_t extraLevels) const; }; - virtual void print(std::ostream& out, bool verbose, - const std::string& indent) const; - - virtual void print(vespalib::asciistream&, - const PrintProperties& = PrintProperties()) const = 0; + virtual void print(std::ostream& out, bool verbose, const std::string& indent) const; + virtual void print(vespalib::asciistream&, const PrintProperties& = PrintProperties()) const = 0; vespalib::string toString(const PrintProperties& = PrintProperties()) const; }; @@ -127,11 +124,36 @@ inline std::ostream& operator<<(std::ostream& out, const Printable& p) { return out; } -inline vespalib::asciistream& operator<<(vespalib::asciistream& out, - const AsciiPrintable& p) +inline vespalib::asciistream& operator<<(vespalib::asciistream& out, const AsciiPrintable& p) { p.print(out); return out; } +template<typename T> +void print(const std::vector<T> & v, vespalib::asciistream& out, const AsciiPrintable::PrintProperties& p) { + if (v.empty()) { + out << "[]"; + return; + } + vespalib::asciistream ost; + ost << v[0]; + bool newLineBetweenEntries = (ost.str().size() > 15); + out << "["; + for (size_t i=0; i<v.size(); ++i) { + if (i != 0) out << ","; + if (newLineBetweenEntries) { + out << "\n" << p.indent(1); + } else { + if (i != 0) { out << " "; } + } + out << v[i]; + } + if (newLineBetweenEntries) { + out << "\n" << p.indent(); + } + out << "]"; +} + + } // vespalib diff --git a/vsm/src/tests/searcher/searcher.cpp b/vsm/src/tests/searcher/searcher.cpp index dbf458a0c32..dfcad223701 100644 --- a/vsm/src/tests/searcher/searcher.cpp +++ b/vsm/src/tests/searcher/searcher.cpp @@ -545,10 +545,18 @@ SearcherTest::testUTF8ExactStringFieldSearcher() { UTF8ExactStringFieldSearcher fs(0); // regular - assertString(fs, "vespa", "vespa", Hits().add(0)); - assertString(fs, "vespa", "vespa vespa", Hits()); - assertString(fs, "vesp", "vespa", Hits()); - assertString(fs, "vesp*", "vespa", Hits().add(0)); + TEST_DO(assertString(fs, "vespa", "vespa", Hits().add(0))); + TEST_DO(assertString(fs, "vespar", "vespa", Hits())); + TEST_DO(assertString(fs, "vespa", "vespar", Hits())); + TEST_DO(assertString(fs, "vespa", "vespa vespa", Hits())); + TEST_DO(assertString(fs, "vesp", "vespa", Hits())); + TEST_DO(assertString(fs, "vesp*", "vespa", Hits().add(0))); + TEST_DO(assertString(fs, "hutte", "hutte", Hits().add(0))); + TEST_DO(assertString(fs, "hütte", "hütte", Hits().add(0))); + TEST_DO(assertString(fs, "hutte", "hütte", Hits())); + TEST_DO(assertString(fs, "hütte", "hutte", Hits())); + TEST_DO(assertString(fs, "hütter", "hütte", Hits())); + TEST_DO(assertString(fs, "hütte", "hütter", Hits())); } void diff --git a/vsm/src/vespa/vsm/searcher/fieldsearcher.h b/vsm/src/vespa/vsm/searcher/fieldsearcher.h index 597baea1b67..73ad8a9f28a 100644 --- a/vsm/src/vespa/vsm/searcher/fieldsearcher.h +++ b/vsm/src/vespa/vsm/searcher/fieldsearcher.h @@ -18,7 +18,7 @@ typedef size_t termsize_t; #endif typedef ucs4_t cmptype_t; -typedef vespalib::Array<cmptype_t, vespalib::DefaultAlloc> SearcherBuf; +typedef vespalib::Array<cmptype_t> SearcherBuf; typedef std::shared_ptr<SearcherBuf> SharedSearcherBuf; typedef std::vector<char> CharVector; diff --git a/vsm/src/vespa/vsm/searcher/utf8stringfieldsearcherbase.cpp b/vsm/src/vespa/vsm/searcher/utf8stringfieldsearcherbase.cpp index 3cba7587598..a19829773b1 100644 --- a/vsm/src/vespa/vsm/searcher/utf8stringfieldsearcherbase.cpp +++ b/vsm/src/vespa/vsm/searcher/utf8stringfieldsearcherbase.cpp @@ -139,7 +139,7 @@ UTF8StringFieldSearcherBase::matchTermExact(const FieldRef & f, QueryTerm & qt) termsize_t tsz = qt.term(term); const cmptype_t * eterm = term+tsz; const byte * e = n + f.size(); - if ((tsz == f.size()) || ((tsz < f.size()) && qt.isPrefix())) { + if (tsz <= f.size()) { bool equal(true); for (; equal && (n < e) && (term < eterm); term++) { if (*term < 0x80) { @@ -149,7 +149,7 @@ UTF8StringFieldSearcherBase::matchTermExact(const FieldRef & f, QueryTerm & qt) equal = (*term == c); } } - if (equal && (qt.isPrefix() || (n == e))) { + if (equal && (term == eterm) && (qt.isPrefix() || (n == e))) { addHit(qt,0); } } |