diff options
295 files changed, 5552 insertions, 2474 deletions
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java index 23389de3fad..33f2f909910 100644 --- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java +++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java @@ -39,6 +39,8 @@ public class MasterElectionTest extends FleetControllerTest { @Rule public TestRule cleanupZookeeperLogsOnSuccess = new CleanupZookeeperLogsOnSuccess(); + private static int defaultZkSessionTimeoutInMillis() { return 30_000; } + protected void setUpFleetController(int count, boolean useFakeTimer, FleetControllerOptions options) throws Exception { if (zooKeeperServer == null) { zooKeeperServer = new ZooKeeperTestServer(); @@ -46,7 +48,7 @@ public class MasterElectionTest extends FleetControllerTest { slobrok = new Slobrok(); usingFakeTimer = useFakeTimer; this.options = options; - this.options.zooKeeperSessionTimeout = 10 * timeoutMS; + this.options.zooKeeperSessionTimeout = defaultZkSessionTimeoutInMillis(); this.options.zooKeeperServerAddress = zooKeeperServer.getAddress(); this.options.slobrokConnectionSpecs = new String[1]; this.options.slobrokConnectionSpecs[0] = "tcp/localhost:" + slobrok.port(); @@ -62,7 +64,7 @@ public class MasterElectionTest extends FleetControllerTest { int fleetControllerIndex, int fleetControllerCount) throws Exception { FleetControllerOptions options = o.clone(); - options.zooKeeperSessionTimeout = 10 * timeoutMS; + options.zooKeeperSessionTimeout = defaultZkSessionTimeoutInMillis(); options.zooKeeperServerAddress = zooKeeperServer.getAddress(); options.slobrokConnectionSpecs = new String[1]; options.slobrokConnectionSpecs[0] = "tcp/localhost:" + slobrok.port(); // Spec.fromLocalHostName(slobrok.port()).toString(); @@ -251,7 +253,6 @@ public class MasterElectionTest extends FleetControllerTest { FleetControllerOptions options = defaultOptions("mycluster"); // "Magic" port value is in range allocated to module for testing. zooKeeperServer = ZooKeeperTestServer.createWithFixedPort(18342); - options.zooKeeperSessionTimeout = 100; options.masterZooKeeperCooldownPeriod = 100; setUpFleetController(2, true, options); waitForMaster(0); @@ -273,7 +274,6 @@ public class MasterElectionTest extends FleetControllerTest { public void testZooKeeperUnavailable() throws Exception { startingTest("MasterElectionTest::testZooKeeperUnavailable"); FleetControllerOptions options = defaultOptions("mycluster"); - options.zooKeeperSessionTimeout = 100; options.masterZooKeeperCooldownPeriod = 100; options.zooKeeperServerAddress = "localhost"; setUpFleetController(5, true, options); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpoint.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java index b0fd3a81732..5641233606e 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpoint.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ContainerEndpoint.java @@ -1,7 +1,5 @@ // Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.config.server.tenant; - -import com.yahoo.vespa.applicationmodel.ClusterId; +package com.yahoo.config.model.api; import java.util.List; import java.util.Objects; @@ -15,15 +13,15 @@ import java.util.Objects; */ public class ContainerEndpoint { - private final ClusterId clusterId; + private final String clusterId; private final List<String> names; - public ContainerEndpoint(ClusterId clusterId, List<String> names) { + public ContainerEndpoint(String clusterId, List<String> names) { this.clusterId = Objects.requireNonNull(clusterId); this.names = List.copyOf(Objects.requireNonNull(names)); } - public ClusterId clusterId() { + public String clusterId() { return clusterId; } diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java index 55373f425e0..b5db9f5eddd 100644 --- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java +++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java @@ -50,13 +50,15 @@ public interface ModelContext { boolean hostedVespa(); Zone zone(); Set<Rotation> rotations(); + Set<ContainerEndpoint> endpoints(); boolean isBootstrap(); boolean isFirstTimeDeployment(); boolean useDedicatedNodeForLogserver(); boolean useFdispatchByDefault(); boolean dispatchWithProtobuf(); boolean useAdaptiveDispatch(); - boolean enableMetricsProxyContainer(); + // TODO: Remove when 7.61 is the oldest model in use + default boolean enableMetricsProxyContainer() { return false; } } } diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java index c19865fafc9..21a8297910f 100644 --- a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java +++ b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java @@ -11,6 +11,7 @@ import com.yahoo.config.application.api.FileRegistry; import com.yahoo.config.application.api.UnparsedConfigDefinition; import com.yahoo.config.application.api.ValidationOverrides; import com.yahoo.config.model.api.ConfigDefinitionRepo; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.model.api.HostProvisioner; import com.yahoo.config.model.api.Model; import com.yahoo.config.model.api.ModelContext; @@ -67,6 +68,7 @@ public class DeployState implements ConfigDefinitionStore { private final ModelContext.Properties properties; private final Version vespaVersion; private final Set<Rotation> rotations; + private final Set<ContainerEndpoint> endpoints; private final Zone zone; private final QueryProfiles queryProfiles; private final SemanticRules semanticRules; @@ -96,6 +98,7 @@ public class DeployState implements ConfigDefinitionStore { Optional<ConfigDefinitionRepo> configDefinitionRepo, java.util.Optional<Model> previousModel, Set<Rotation> rotations, + Set<ContainerEndpoint> endpoints, Collection<MlModelImporter> modelImporters, Zone zone, QueryProfiles queryProfiles, @@ -115,6 +118,7 @@ public class DeployState implements ConfigDefinitionStore { this.permanentApplicationPackage = permanentApplicationPackage; this.configDefinitionRepo = configDefinitionRepo; this.rotations = rotations; + this.endpoints = Set.copyOf(endpoints); this.zone = zone; this.queryProfiles = queryProfiles; // TODO: Remove this by seeing how pagetemplates are propagated this.semanticRules = semanticRules; // TODO: Remove this by seeing how pagetemplates are propagated @@ -234,6 +238,10 @@ public class DeployState implements ConfigDefinitionStore { return this.rotations; // todo: consider returning a copy or immutable view } + public Set<ContainerEndpoint> getEndpoints() { + return endpoints; + } + /** Returns the zone in which this is currently running */ public Zone zone() { return zone; } @@ -260,6 +268,7 @@ public class DeployState implements ConfigDefinitionStore { private Optional<ConfigDefinitionRepo> configDefinitionRepo = Optional.empty(); private Optional<Model> previousModel = Optional.empty(); private Set<Rotation> rotations = new HashSet<>(); + private Set<ContainerEndpoint> endpoints = Set.of(); private Collection<MlModelImporter> modelImporters = Collections.emptyList(); private Zone zone = Zone.defaultZone(); private Instant now = Instant.now(); @@ -315,6 +324,11 @@ public class DeployState implements ConfigDefinitionStore { return this; } + public Builder endpoints(Set<ContainerEndpoint> endpoints) { + this.endpoints = endpoints; + return this; + } + public Builder modelImporters(Collection<MlModelImporter> modelImporters) { this.modelImporters = modelImporters; return this; @@ -356,6 +370,7 @@ public class DeployState implements ConfigDefinitionStore { configDefinitionRepo, previousModel, rotations, + endpoints, modelImporters, zone, queryProfiles, diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java index 4b35af53154..87ff9d1bb2a 100644 --- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java +++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java @@ -3,6 +3,7 @@ package com.yahoo.config.model.deploy; import com.google.common.collect.ImmutableList; import com.yahoo.config.model.api.ConfigServerSpec; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.model.api.ModelContext; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.HostName; @@ -31,13 +32,13 @@ public class TestProperties implements ModelContext.Properties { private boolean hostedVespa = false; private Zone zone; private Set<Rotation> rotations; + private Set<ContainerEndpoint> endpoints = Collections.emptySet(); private boolean isBootstrap = false; private boolean isFirstTimeDeployment = false; private boolean useDedicatedNodeForLogserver = false; private boolean useFdispatchByDefault = true; private boolean dispatchWithProtobuf = true; private boolean useAdaptiveDispatch = false; - private boolean enableMetricsProxyContainer = false; @Override public boolean multitenant() { return multitenant; } @@ -49,13 +50,14 @@ public class TestProperties implements ModelContext.Properties { @Override public boolean hostedVespa() { return hostedVespa; } @Override public Zone zone() { return zone; } @Override public Set<Rotation> rotations() { return rotations; } + @Override public Set<ContainerEndpoint> endpoints() { return endpoints; } + @Override public boolean isBootstrap() { return isBootstrap; } @Override public boolean isFirstTimeDeployment() { return isFirstTimeDeployment; } @Override public boolean useAdaptiveDispatch() { return useAdaptiveDispatch; } @Override public boolean useDedicatedNodeForLogserver() { return useDedicatedNodeForLogserver; } @Override public boolean useFdispatchByDefault() { return useFdispatchByDefault; } @Override public boolean dispatchWithProtobuf() { return dispatchWithProtobuf; } - @Override public boolean enableMetricsProxyContainer() { return enableMetricsProxyContainer; } public TestProperties setApplicationId(ApplicationId applicationId) { this.applicationId = applicationId; @@ -87,10 +89,6 @@ public class TestProperties implements ModelContext.Properties { return this; } - public TestProperties setEnableMetricsProxyContainer(boolean enableMetricsProxyContainer) { - this.enableMetricsProxyContainer = enableMetricsProxyContainer; - return this; - } public static class Spec implements ConfigServerSpec { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java index 9e0bbc395df..0dde5c99d4a 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java @@ -56,7 +56,8 @@ public class HostSystem extends AbstractConfigProducer<Host> { } if (! hostname.contains(".")) { deployLogger.log(Level.WARNING, "Host named '" + hostname + "' may not receive any config " + - "since it is not a canonical hostname"); + "since it is not a canonical hostname." + + "Disregard this warning when testing in a Docker container."); } } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java index af6400023cc..f69330eb196 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java @@ -139,6 +139,7 @@ public class VespaModelFactory implements ModelFactory { .vespaVersion(version()) .modelHostProvisioner(createHostProvisioner(modelContext)) .rotations(modelContext.properties().rotations()) + .endpoints(modelContext.properties().endpoints()) .modelImporters(modelImporters) .zone(zone) .now(clock.instant()) diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java index fe40e6ffa84..29d1b557c49 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/ConsumersConfigGenerator.java @@ -10,14 +10,12 @@ import com.yahoo.vespa.model.admin.monitoring.MetricSet; import com.yahoo.vespa.model.admin.monitoring.MetricsConsumer; import javax.annotation.Nullable; -import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import static com.yahoo.vespa.model.admin.monitoring.DefaultMetricsConsumer.VESPA_CONSUMER_ID; -import static com.yahoo.vespa.model.admin.monitoring.DefaultMetricsConsumer.getDefaultMetricsConsumer; +import static com.yahoo.vespa.model.admin.monitoring.VespaMetricsConsumer.VESPA_CONSUMER_ID; /** * Helper class to generate config for metrics consumers. diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java index 3bc38cad1d1..2a698233713 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java @@ -19,10 +19,8 @@ import java.util.LinkedHashMap; import java.util.Map; import static com.yahoo.config.model.api.container.ContainerServiceType.METRICS_PROXY_CONTAINER; -import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainer.NodeDimensionNames.CANONICAL_FLAVOR; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainer.NodeDimensionNames.CLUSTER_ID; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainer.NodeDimensionNames.CLUSTER_TYPE; -import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainer.NodeDimensionNames.FLAVOR; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.METRICS_PROXY_BUNDLE_NAME; /** @@ -37,8 +35,6 @@ public class MetricsProxyContainer extends Container implements { static final class NodeDimensionNames { - static final String FLAVOR = "flavor"; - static final String CANONICAL_FLAVOR = "canonicalFlavor"; static final String CLUSTER_TYPE = "clustertype"; static final String CLUSTER_ID = "clusterid"; } @@ -118,11 +114,6 @@ public class MetricsProxyContainer extends Container implements public void getConfig(NodeDimensionsConfig.Builder builder) { Map<String, String> dimensions = new LinkedHashMap<>(); if (isHostedVespa) { - getHostResource().getFlavor().ifPresent(flavor -> { - dimensions.put(FLAVOR, flavor.name()); - dimensions.put(CANONICAL_FLAVOR, flavor.canonicalName()); - }); - getHostResource().primaryClusterMembership().map(ClusterMembership::cluster).ifPresent(cluster -> { dimensions.put(CLUSTER_TYPE, cluster.type().name()); dimensions.put(CLUSTER_ID, cluster.id().value()); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java index 47c6b2dbb52..5a41696c6f2 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java @@ -47,7 +47,7 @@ import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerClus import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames.LEGACY_APPLICATION; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames.TENANT; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames.ZONE; -import static com.yahoo.vespa.model.admin.monitoring.DefaultMetricsConsumer.getDefaultMetricsConsumer; +import static com.yahoo.vespa.model.admin.monitoring.VespaMetricsConsumer.getVespaMetricsConsumer; import static com.yahoo.vespa.model.admin.monitoring.MetricSet.emptyMetricSet; import static com.yahoo.vespa.model.container.xml.BundleMapper.JarSuffix.JAR_WITH_DEPS; import static com.yahoo.vespa.model.container.xml.BundleMapper.absoluteBundlePath; @@ -128,7 +128,7 @@ public class MetricsProxyContainerCluster extends ContainerCluster<MetricsProxyC @Override public void getConfig(ConsumersConfig.Builder builder) { - var amendedDefaultConsumer = addMetrics(getDefaultMetricsConsumer(), getAdditionalDefaultMetrics().getMetrics()); + var amendedDefaultConsumer = addMetrics(getVespaMetricsConsumer(), getAdditionalDefaultMetrics().getMetrics()); builder.consumer.addAll(generateConsumers(amendedDefaultConsumer, getUserMetricsConsumers())); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/DefaultMetricsConsumer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricsConsumer.java index e9eca67a55a..81e9cfcd6a0 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/DefaultMetricsConsumer.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricsConsumer.java @@ -10,22 +10,22 @@ import static com.yahoo.vespa.model.admin.monitoring.VespaMetricSet.vespaMetricS import static java.util.Collections.emptyList; /** - * This class sets up the default 'Vespa' metrics consumer. + * This class sets up the 'Vespa' metrics consumer. * * @author trygve * @author gjoranv */ -public class DefaultMetricsConsumer { +public class VespaMetricsConsumer { public static final String VESPA_CONSUMER_ID = VespaMetrics.VESPA_CONSUMER_ID.id; - private static final MetricSet defaultConsumerMetrics = new MetricSet("default-consumer", + private static final MetricSet defaultConsumerMetrics = new MetricSet("vespa-consumer-metrics", emptyList(), ImmutableList.of(vespaMetricSet, systemMetricSet, networkMetricSet)); - public static MetricsConsumer getDefaultMetricsConsumer() { + public static MetricsConsumer getVespaMetricsConsumer() { return new MetricsConsumer(VESPA_CONSUMER_ID, defaultConsumerMetrics); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java index fab1e90cc03..0ad0d57c1c3 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/xml/MetricsBuilder.java @@ -3,7 +3,6 @@ package com.yahoo.vespa.model.admin.monitoring.builder.xml; import com.yahoo.config.model.ConfigModelContext.ApplicationType; import com.yahoo.text.XML; -import com.yahoo.vespa.model.admin.monitoring.DefaultVespaMetrics; import com.yahoo.vespa.model.admin.monitoring.Metric; import com.yahoo.vespa.model.admin.monitoring.MetricSet; import com.yahoo.vespa.model.admin.monitoring.MetricsConsumer; @@ -15,7 +14,7 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import static com.yahoo.vespa.model.admin.monitoring.DefaultMetricsConsumer.VESPA_CONSUMER_ID; +import static com.yahoo.vespa.model.admin.monitoring.VespaMetricsConsumer.VESPA_CONSUMER_ID; import static com.yahoo.vespa.model.admin.monitoring.DefaultVespaMetrics.defaultVespaMetricSet; import static com.yahoo.vespa.model.admin.monitoring.SystemMetrics.systemMetricSet; diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java index 642f882f3ed..f68ddecad9d 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java @@ -9,6 +9,7 @@ import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.application.api.DeploymentSpec; import com.yahoo.config.model.ConfigModelContext; import com.yahoo.config.model.api.ConfigServerSpec; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.model.application.provider.IncludeDirs; import com.yahoo.config.model.builder.xml.ConfigModelBuilder; import com.yahoo.config.model.builder.xml.ConfigModelId; @@ -72,6 +73,7 @@ import org.w3c.dom.Node; import java.net.URI; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -212,13 +214,13 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { context.getDeployState().getProperties().athenzDnsSuffix(), context.getDeployState().zone(), deploymentSpec); - addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getRotations(), deploymentSpec); + addRotationProperties(cluster, context.getDeployState().zone(), context.getDeployState().getRotations(), context.getDeployState().getEndpoints(), deploymentSpec); }); } - private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<Rotation> rotations, DeploymentSpec spec) { + private void addRotationProperties(ApplicationContainerCluster cluster, Zone zone, Set<Rotation> rotations, Set<ContainerEndpoint> endpoints, DeploymentSpec spec) { cluster.getContainers().forEach(container -> { - setRotations(container, rotations, spec.globalServiceId(), cluster.getName()); + setRotations(container, rotations, endpoints, spec.globalServiceId(), cluster.getName()); container.setProp("activeRotation", Boolean.toString(zoneHasActiveRotation(zone, spec))); }); } @@ -229,13 +231,30 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> { declaredZone.active()); } - private void setRotations(Container container, Set<Rotation> rotations, Optional<String> globalServiceId, String containerClusterName) { + private void setRotations(Container container, + Set<Rotation> rotations, + Set<ContainerEndpoint> endpoints, + Optional<String> globalServiceId, + String containerClusterName) { + final Set<String> rotationsProperty = new HashSet<>(); + // Add the legacy rotations to the list of available rotations. Using the same test + // as was used before to mirror the old business logic for global-service-id. if ( ! rotations.isEmpty() && globalServiceId.isPresent()) { if (containerClusterName.equals(globalServiceId.get())) { - container.setProp("rotations", rotations.stream().map(Rotation::getId).collect(Collectors.joining(","))); + rotations.stream().map(Rotation::getId).forEach(rotationsProperty::add); } } + + // For ContainerEndpoints this is more straight-forward, just add all that are present + endpoints.stream() + .filter(endpoint -> endpoint.clusterId().equals(containerClusterName)) + .flatMap(endpoint -> endpoint.names().stream()) + .forEach(rotationsProperty::add); + + // Build the comma delimited list of endpoints this container should be known as. + // Confusingly called 'rotations' for legacy reasons. + container.setProp("rotations", String.join(",", rotationsProperty)); } private void addRoutingAliases(ApplicationContainerCluster cluster, Element spec, Environment environment) { diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/VespaMlModelTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/VespaMlModelTestCase.java index 34b727f9f4e..412264aec57 100644 --- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/VespaMlModelTestCase.java +++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/VespaMlModelTestCase.java @@ -26,7 +26,7 @@ public class VespaMlModelTestCase { private final String expectedRankConfig = "constant(constant1).type : tensor(x[3])\n" + - "constant(constant1).value : tensor(x[3]):{{x:0}:0.5,{x:1}:1.5,{x:2}:2.5}\n" + + "constant(constant1).value : tensor(x[3]):[0.5, 1.5, 2.5]\n" + "rankingExpression(foo1).rankingScript : reduce(reduce(input1 * input2, sum, name) * constant(constant1), max, x) * 3.0\n" + "rankingExpression(foo1).input2.type : tensor(x[3])\n" + "rankingExpression(foo1).input1.type : tensor(name{},x[3])\n" + diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java index a08c5394dda..ff38a184eec 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java @@ -33,7 +33,7 @@ import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.g import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getHostedModel; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getModel; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getQrStartConfig; -import static com.yahoo.vespa.model.admin.monitoring.DefaultMetricsConsumer.VESPA_CONSUMER_ID; +import static com.yahoo.vespa.model.admin.monitoring.VespaMetricsConsumer.VESPA_CONSUMER_ID; import static com.yahoo.vespa.model.admin.monitoring.DefaultVespaMetrics.defaultVespaMetricSet; import static com.yahoo.vespa.model.admin.monitoring.NetworkMetrics.networkMetricSet; import static com.yahoo.vespa.model.admin.monitoring.SystemMetrics.systemMetricSet; diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java index d2bf4b601a6..f755871ac4b 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerTest.java @@ -11,7 +11,6 @@ import org.junit.Test; import static com.yahoo.config.model.api.container.ContainerServiceType.METRICS_PROXY_CONTAINER; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.CLUSTER_CONFIG_ID; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.CONTAINER_CONFIG_ID; -import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.MY_FLAVOR; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getHostedModel; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getModel; import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getNodeDimensionsConfig; @@ -33,7 +32,6 @@ public class MetricsProxyContainerTest { public void one_metrics_proxy_container_is_added_to_every_node() { var numberOfHosts = 4; var tester = new VespaModelTester(); - tester.enableMetricsProxyContainer(true); tester.addHosts(numberOfHosts); VespaModel model = tester.createModel(servicesWithManyNodes(), true); @@ -108,8 +106,6 @@ public class MetricsProxyContainerTest { assertEquals("content", config.dimensions(NodeDimensionNames.CLUSTER_TYPE)); assertEquals("my-content", config.dimensions(NodeDimensionNames.CLUSTER_ID)); - assertEquals(MY_FLAVOR, config.dimensions(NodeDimensionNames.FLAVOR)); - assertEquals(MY_FLAVOR, config.dimensions(NodeDimensionNames.CANONICAL_FLAVOR)); } @@ -162,7 +158,7 @@ public class MetricsProxyContainerTest { " </admin>", " <content version='1.0' id='my-content'>", " <documents />", - " <nodes count='1' flavor='" + MY_FLAVOR + "' />", + " <nodes count='1' />", " </content>", "</services>" ); diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java index 13589c763e2..59b7110e96e 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java @@ -16,7 +16,7 @@ import com.yahoo.vespa.model.VespaModel; import com.yahoo.vespa.model.admin.monitoring.Metric; import com.yahoo.vespa.model.test.VespaModelTester; -import static com.yahoo.vespa.model.admin.monitoring.DefaultMetricsConsumer.VESPA_CONSUMER_ID; +import static com.yahoo.vespa.model.admin.monitoring.VespaMetricsConsumer.VESPA_CONSUMER_ID; import static org.junit.Assert.assertEquals; /** @@ -27,7 +27,6 @@ class MetricsProxyModelTester { static final String MY_TENANT = "mytenant"; static final String MY_APPLICATION = "myapp"; static final String MY_INSTANCE = "myinstance"; - static final String MY_FLAVOR = "myflavor"; static final String CLUSTER_CONFIG_ID = "admin/metrics"; @@ -37,7 +36,6 @@ class MetricsProxyModelTester { static VespaModel getModel(String servicesXml) { var numberOfHosts = 1; var tester = new VespaModelTester(); - tester.enableMetricsProxyContainer(true); tester.addHosts(numberOfHosts); tester.setHosted(false); return tester.createModel(servicesXml, true); @@ -46,8 +44,7 @@ class MetricsProxyModelTester { static VespaModel getHostedModel(String servicesXml) { var numberOfHosts = 2; var tester = new VespaModelTester(); - tester.enableMetricsProxyContainer(true); - tester.addHosts(flavorFromString(MY_FLAVOR), numberOfHosts); + tester.addHosts(numberOfHosts); tester.setHosted(true); tester.setApplicationId(MY_TENANT, MY_APPLICATION, MY_INSTANCE); return tester.createModel(servicesXml, true); diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java index c7816c23119..f787453dfb6 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilderTest.java @@ -6,6 +6,7 @@ import com.yahoo.component.ComponentId; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.model.NullConfigModelRegistry; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.model.builder.xml.test.DomBuilderTest; import com.yahoo.config.model.deploy.DeployState; import com.yahoo.config.model.deploy.TestProperties; @@ -33,6 +34,7 @@ import com.yahoo.vespa.model.AbstractService; import com.yahoo.vespa.model.VespaModel; import com.yahoo.vespa.model.container.Container; import com.yahoo.vespa.model.container.ContainerCluster; +import com.yahoo.vespa.model.container.ContainerModel; import com.yahoo.vespa.model.container.SecretStore; import com.yahoo.vespa.model.container.component.Component; import com.yahoo.vespa.model.content.utils.ContentClusterUtils; @@ -45,8 +47,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.logging.Level; +import java.util.stream.Collectors; +import static com.yahoo.config.model.test.TestUtil.joinLines; import static com.yahoo.test.LinePatternMatcher.containsLineWithPattern; import static com.yahoo.vespa.defaults.Defaults.getDefaults; import static org.hamcrest.CoreMatchers.is; @@ -611,6 +616,48 @@ public class ContainerModelBuilderTest extends ContainerModelBuilderTestBase { } @Test + public void endpoints_are_added_to_containers() throws IOException, SAXException { + final var servicesXml = joinLines("", + "<container id='comics-search' version='1.0'>", + " <nodes>", + " <node hostalias='host1' />", + " </nodes>", + "</container>" + ); + + final var deploymentXml = joinLines("", + "<deployment version='1.0'>", + " <prod />", + "</deployment>" + ); + + final var applicationPackage = new MockApplicationPackage.Builder() + .withServices(servicesXml) + .withDeploymentSpec(deploymentXml) + .build(); + + final var deployState = new DeployState.Builder() + .applicationPackage(applicationPackage) + .zone(new Zone(Environment.prod, RegionName.from("us-east-1"))) + .endpoints(Set.of(new ContainerEndpoint("comics-search", List.of("nalle", "balle")))) + .properties(new TestProperties().setHostedVespa(true)) + .build(); + + final var model = new VespaModel(new NullConfigModelRegistry(), deployState); + final var containers = model.getContainerClusters().values().stream() + .flatMap(cluster -> cluster.getContainers().stream()) + .collect(Collectors.toList()); + + assertFalse("Missing container objects based on configuration", containers.isEmpty()); + + containers.forEach(container -> { + final var rotations = container.getServicePropertyString("rotations").split(","); + final var rotationsSet = Set.of(rotations); + assertEquals(Set.of("balle", "nalle"), rotationsSet); + }); + } + + @Test public void singlenode_servicespec_is_used_with_hosted_vespa() throws IOException, SAXException { String servicesXml = "<container id='default' version='1.0' />"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java index 866c4027711..2e5acb9025d 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java @@ -48,7 +48,6 @@ public class VespaModelTester { private Map<NodeResources, Collection<Host>> hostsByResources = new HashMap<>(); private ApplicationId applicationId = ApplicationId.defaultId(); private boolean useDedicatedNodeForLogserver = false; - private boolean enableMetricsProxyContainer = false; public VespaModelTester() { this(new NullConfigModelRegistry()); @@ -105,10 +104,6 @@ public class VespaModelTester { this.useDedicatedNodeForLogserver = useDedicatedNodeForLogserver; } - public void enableMetricsProxyContainer(boolean enableMetricsProxyContainer) { - this.enableMetricsProxyContainer = enableMetricsProxyContainer; - } - /** Creates a model which uses 0 as start index and fails on out of capacity */ public VespaModel createModel(String services, String ... retiredHostNames) { return createModel(Zone.defaultZone(), services, true, retiredHostNames); @@ -149,8 +144,7 @@ public class VespaModelTester { .setMultitenant(true) .setHostedVespa(hosted) .setApplicationId(applicationId) - .setUseDedicatedNodeForLogserver(useDedicatedNodeForLogserver) - .setEnableMetricsProxyContainer(enableMetricsProxyContainer); + .setUseDedicatedNodeForLogserver(useDedicatedNodeForLogserver); DeployState deployState = new DeployState.Builder() .applicationPackage(appPkg) diff --git a/config-provisioning/abi-spec.json b/config-provisioning/abi-spec.json index 18f4d317019..cf3f2d35bd7 100644 --- a/config-provisioning/abi-spec.json +++ b/config-provisioning/abi-spec.json @@ -391,18 +391,14 @@ "public boolean hasFastDisk()", "public double getBandwidth()", "public double getMinCpuCores()", - "public java.lang.String getDescription()", "public boolean isRetired()", "public com.yahoo.config.provision.Flavor$Type getType()", "public boolean isDocker()", - "public int getIdealHeadroom()", "public java.lang.String canonicalName()", "public boolean isCanonical()", "public java.util.List replaces()", "public boolean satisfies(com.yahoo.config.provision.Flavor)", - "public boolean hasAtLeast(com.yahoo.config.provision.NodeResources)", "public void freeze()", - "public boolean isLargerThan(com.yahoo.config.provision.Flavor)", "public int hashCode()", "public boolean equals(java.lang.Object)", "public java.lang.String toString()" diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/AllocatedHosts.java b/config-provisioning/src/main/java/com/yahoo/config/provision/AllocatedHosts.java index de4f3a555bd..96942c53a12 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/AllocatedHosts.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/AllocatedHosts.java @@ -26,6 +26,13 @@ import java.util.Set; */ public class AllocatedHosts { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String mappingKey = "mapping"; private static final String hostSpecKey = "hostSpec"; private static final String hostSpecHostNameKey = "hostName"; diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Flavor.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Flavor.java index 8667707883d..b393d9ee22a 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/Flavor.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Flavor.java @@ -5,10 +5,8 @@ import com.google.common.collect.ImmutableList; import com.yahoo.config.provisioning.FlavorsConfig; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Objects; -import java.util.Optional; /** * A host or node flavor. @@ -26,10 +24,8 @@ public class Flavor { private final boolean isStock; private final Type type; private final double bandwidth; - private final String description; private final boolean retired; private List<Flavor> replacesFlavors; - private int idealHeadroom; // Note: Not used after Vespa 6.282 /** The hardware resources of this flavor */ private NodeResources resources; @@ -46,10 +42,8 @@ public class Flavor { flavorConfig.minDiskAvailableGb(), flavorConfig.fastDisk() ? NodeResources.DiskSpeed.fast : NodeResources.DiskSpeed.slow); this.bandwidth = flavorConfig.bandwidth(); - this.description = flavorConfig.description(); this.retired = flavorConfig.retired(); this.replacesFlavors = new ArrayList<>(); - this.idealHeadroom = flavorConfig.idealHeadroom(); } /** Creates a *node* flavor from a node resources spec */ @@ -64,10 +58,8 @@ public class Flavor { this.isStock = true; this.type = Type.DOCKER_CONTAINER; this.bandwidth = 1; - this.description = ""; this.retired = false; - this.replacesFlavors = Collections.emptyList(); - this.idealHeadroom = 0; + this.replacesFlavors = List.of(); this.resources = resources; } @@ -102,8 +94,6 @@ public class Flavor { public double getMinCpuCores() { return resources.vcpu(); } - public String getDescription() { return description; } - /** Returns whether the flavor is retired */ public boolean isRetired() { return retired; @@ -114,11 +104,6 @@ public class Flavor { /** Convenience, returns getType() == Type.DOCKER_CONTAINER */ public boolean isDocker() { return type == Type.DOCKER_CONTAINER; } - /** The free capacity we would like to preserve for this flavor */ - public int getIdealHeadroom() { - return idealHeadroom; - } - /** * Returns the canonical name of this flavor - which is the name which should be used as an interface to users. * The canonical name of this flavor is: @@ -164,23 +149,10 @@ public class Flavor { return false; } - /** - * Returns whether this flavor has at least the given resources, i.e if all resources of this are at least - * as large as the given resources. - */ - public boolean hasAtLeast(NodeResources resources) { - return this.resources.satisfies(resources); - } - /** Irreversibly freezes the content of this */ public void freeze() { replacesFlavors = ImmutableList.copyOf(replacesFlavors); } - - /** Returns whether this flavor has at least as much of each hardware resource as the given flavor */ - public boolean isLargerThan(Flavor other) { - return hasAtLeast(other.resources); - } @Override public int hashCode() { return name.hashCode(); } diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneApi.java b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneApi.java index 9f6ba29d8de..fd76dc10bdb 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneApi.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneApi.java @@ -17,8 +17,4 @@ public interface ZoneApi { default RegionName getRegionName() { return getId().region(); } CloudName getCloudName(); - - default ZoneId toDeprecatedId() { - return ZoneId.from(getEnvironment(), getRegionName(), getCloudName(), getSystemName()); - } } diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java index 5e664e00b4c..b0ac59718aa 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java @@ -20,30 +20,16 @@ public class ZoneId { private final Environment environment; private final RegionName region; - private final SystemName system; - private ZoneId(Environment environment, RegionName region, CloudName cloud, SystemName system) { + private ZoneId(Environment environment, RegionName region) { this.environment = Objects.requireNonNull(environment, "environment must be non-null"); this.region = Objects.requireNonNull(region, "region must be non-null"); - this.system = Objects.requireNonNull(system, "system must be non-null"); - } - - private ZoneId(Environment environment, RegionName region) { - this(environment, region, CloudName.defaultName(), SystemName.defaultSystem()); } public static ZoneId from(Environment environment, RegionName region) { return new ZoneId(environment, region); } - public static ZoneId from(SystemName system, Environment environment, RegionName region) { - return new ZoneId(environment, region, CloudName.defaultName(), system); - } - - public static ZoneId from(Environment environment, RegionName region, CloudName cloud, SystemName system) { - return new ZoneId(environment, region, cloud, system); - } - public static ZoneId from(String environment, String region) { return from(Environment.from(environment), RegionName.from(region)); } @@ -55,20 +41,14 @@ public class ZoneId { case 2: return from(parts[0], parts[1]); case 4: - return from(parts[2], parts[3], parts[0], parts[1]); + // Deprecated: parts[0] == cloud, parts[1] == system + // TODO: Figure out whether this can be removed + return from(parts[2], parts[3]); default: throw new IllegalArgumentException("Cannot deserialize zone id '" + value + "'"); } } - public static ZoneId from(Environment environment, RegionName region, CloudName cloud) { - return new ZoneId(environment, region, cloud, SystemName.defaultSystem()); - } - - public static ZoneId from(String environment, String region, String cloud, String system) { - return new ZoneId(Environment.from(environment), RegionName.from(region), CloudName.from(cloud), SystemName.from(system)); - } - public static ZoneId defaultId() { return new ZoneId(Environment.defaultEnvironment(), RegionName.defaultName()); } @@ -81,10 +61,6 @@ public class ZoneId { return region; } - public SystemName system() { - return system; - } - /** Returns the serialised value of this. Inverse of {@code ZoneId.from(String value)}. */ public String value() { return environment + "." + region; diff --git a/config-provisioning/src/main/resources/configdefinitions/flavors.def b/config-provisioning/src/main/resources/configdefinitions/flavors.def index 1e40f6f8f36..1cfb18d2cd2 100644 --- a/config-provisioning/src/main/resources/configdefinitions/flavors.def +++ b/config-provisioning/src/main/resources/configdefinitions/flavors.def @@ -43,12 +43,6 @@ flavor[].fastDisk bool default=true # Expected network interface bandwidth available for this flavor, in Mbit/s. flavor[].bandwidth double default=0.0 -# Human readable free text for description of node. -flavor[].description string default="" - # The flavor is retired and should no longer be used. flavor[].retired bool default=false -# The free capacity we would like to preserve for this flavor -# Note: Not used after Vespa 6.282 -flavor[].idealHeadroom int default=0 diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/NodeFlavorsTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/NodeFlavorsTest.java index 81f3798a370..55ffa821e26 100644 --- a/config-provisioning/src/test/java/com/yahoo/config/provision/NodeFlavorsTest.java +++ b/config-provisioning/src/test/java/com/yahoo/config/provision/NodeFlavorsTest.java @@ -10,9 +10,7 @@ import java.util.ArrayList; import java.util.List; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; public class NodeFlavorsTest { @@ -59,16 +57,6 @@ public class NodeFlavorsTest { } @Test - public void testHasAtLeast() { - Flavor flavor = new Flavor(new NodeResources(1, 2, 3)); - assertTrue(flavor.hasAtLeast(new NodeResources(1, 2, 3))); - assertTrue(flavor.hasAtLeast(new NodeResources(1, 1.5, 2))); - assertFalse(flavor.hasAtLeast(new NodeResources(1, 1.5, 4))); - assertFalse(flavor.hasAtLeast(new NodeResources(2, 1.5, 4))); - assertFalse(flavor.hasAtLeast(new NodeResources(1, 2.1, 4))); - } - - @Test public void testRetiredFlavorWithoutReplacement() { FlavorsConfig.Builder builder = new FlavorsConfig.Builder(); List<FlavorsConfig.Flavor.Builder> flavorBuilderList = new ArrayList<>(); diff --git a/config-provisioning/src/test/java/com/yahoo/config/provision/ZoneIdTest.java b/config-provisioning/src/test/java/com/yahoo/config/provision/ZoneIdTest.java index 27d45ba7d7d..434badbe9bf 100644 --- a/config-provisioning/src/test/java/com/yahoo/config/provision/ZoneIdTest.java +++ b/config-provisioning/src/test/java/com/yahoo/config/provision/ZoneIdTest.java @@ -26,17 +26,6 @@ public class ZoneIdTest { ZoneId zoneId = ZoneId.from(environment, region); assertEquals(region, zoneId.region()); assertEquals(environment, zoneId.environment()); - assertEquals(SystemName.defaultSystem(), zoneId.system()); - - ZoneId zoneIdWithSystem = ZoneId.from(system, environment, region); - assertEquals(region, zoneIdWithSystem.region()); - assertEquals(environment, zoneIdWithSystem.environment()); - assertEquals(system, zoneIdWithSystem.system()); - - ZoneId zoneIdWithCloudAndSystem = ZoneId.from(environment, region, cloud, system); - assertEquals(region, zoneIdWithCloudAndSystem.region()); - assertEquals(environment, zoneIdWithCloudAndSystem.environment()); - assertEquals(system, zoneIdWithCloudAndSystem.system()); } @Test @@ -45,12 +34,6 @@ public class ZoneIdTest { assertEquals(environment.value() + "." + region.value(), zoneId.value()); assertEquals(ZoneId.from(zoneId.value()), zoneId); - ZoneId zoneIdWithCloudAndSystem = ZoneId.from(environment, region, cloud, system); - assertEquals(environment.value() + "." + region.value(), zoneIdWithCloudAndSystem.value()); - assertEquals(ZoneId.from(zoneIdWithCloudAndSystem.value()), zoneIdWithCloudAndSystem); - // TODO: Expect cloud and system to be part of deserialized value when the new format is supported everywhere - //assertEquals(cloud.value() + "." + system.name() + "." + environment.value() + "." + region.value() , zoneId.value()); - String serializedZoneId = "some.illegal.value"; expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("Cannot deserialize zone id '" + serializedZoneId + "'"); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java index 0279d175488..4627d350eb2 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java @@ -7,6 +7,7 @@ import com.yahoo.config.application.api.DeployLogger; import com.yahoo.config.application.api.FileRegistry; import com.yahoo.config.model.api.ConfigDefinitionRepo; import com.yahoo.config.model.api.ConfigServerSpec; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.model.api.HostProvisioner; import com.yahoo.config.model.api.Model; import com.yahoo.config.model.api.ModelContext; @@ -126,13 +127,13 @@ public class ModelContextImpl implements ModelContext { private final boolean hostedVespa; private final Zone zone; private final Set<Rotation> rotations; + private final Set<ContainerEndpoint> endpoints; private final boolean isBootstrap; private final boolean isFirstTimeDeployment; private final boolean useDedicatedNodeForLogserver; private final boolean useFdispatchByDefault; private final boolean useAdaptiveDispatch; private final boolean dispatchWithProtobuf; - private final boolean enableMetricsProxyContainer; public Properties(ApplicationId applicationId, boolean multitenantFromConfig, @@ -143,6 +144,7 @@ public class ModelContextImpl implements ModelContext { boolean hostedVespa, Zone zone, Set<Rotation> rotations, + Set<ContainerEndpoint> endpoints, boolean isBootstrap, boolean isFirstTimeDeployment, FlagSource flagSource) { @@ -155,6 +157,7 @@ public class ModelContextImpl implements ModelContext { this.hostedVespa = hostedVespa; this.zone = zone; this.rotations = rotations; + this.endpoints = endpoints; this.isBootstrap = isBootstrap; this.isFirstTimeDeployment = isFirstTimeDeployment; this.useDedicatedNodeForLogserver = Flags.USE_DEDICATED_NODE_FOR_LOGSERVER.bindTo(flagSource) @@ -165,8 +168,6 @@ public class ModelContextImpl implements ModelContext { .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value(); this.useAdaptiveDispatch = Flags.USE_ADAPTIVE_DISPATCH.bindTo(flagSource) .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value(); - this.enableMetricsProxyContainer = Flags.ENABLE_METRICS_PROXY_CONTAINER.bindTo(flagSource) - .with(FetchVector.Dimension.APPLICATION_ID, applicationId.serializedForm()).value(); } @Override @@ -201,6 +202,9 @@ public class ModelContextImpl implements ModelContext { public Set<Rotation> rotations() { return rotations; } @Override + public Set<ContainerEndpoint> endpoints() { return endpoints; } + + @Override public boolean isBootstrap() { return isBootstrap; } @Override @@ -218,8 +222,6 @@ public class ModelContextImpl implements ModelContext { @Override public boolean useAdaptiveDispatch() { return useAdaptiveDispatch; } - @Override - public boolean enableMetricsProxyContainer() { return enableMetricsProxyContainer; } } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java index 6351a93e6e6..117a9e0cac5 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java @@ -1,6 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.modelfactory; +import com.google.common.collect.ImmutableSet; import com.yahoo.component.Version; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.application.api.DeployLogger; @@ -24,6 +25,7 @@ import com.yahoo.vespa.config.server.monitoring.Metrics; import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; import com.yahoo.vespa.config.server.session.SessionZooKeeperClient; import com.yahoo.vespa.config.server.session.SilentDeployLogger; +import com.yahoo.vespa.config.server.tenant.ContainerEndpointsCache; import com.yahoo.vespa.config.server.tenant.Rotations; import com.yahoo.vespa.config.server.tenant.TenantRepository; import com.yahoo.vespa.curator.Curator; @@ -127,6 +129,7 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> { configserverConfig.hostedVespa(), zone(), new Rotations(curator, TenantRepository.getTenantPath(tenant)).readRotationsFromZooKeeper(applicationId), + ImmutableSet.copyOf(new ContainerEndpointsCache(TenantRepository.getTenantPath(tenant), curator).read(applicationId)), false, // We may be bootstrapping, but we only know and care during prepare false, // Always false, assume no one uses it when activating flagSource); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java index 4cabf39edcc..00a7625ee87 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java @@ -10,7 +10,7 @@ import com.yahoo.slime.Slime; import com.yahoo.vespa.config.SlimeUtils; import com.yahoo.vespa.config.server.TimeoutBudget; import com.yahoo.vespa.config.server.http.SessionHandler; -import com.yahoo.vespa.config.server.tenant.ContainerEndpoint; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.vespa.config.server.tenant.ContainerEndpointSerializer; import java.time.Clock; diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java index 7af61a6efc1..30ba9989343 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.config.server.session; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import com.google.inject.Inject; import com.yahoo.cloud.config.ConfigserverConfig; import com.yahoo.component.Version; @@ -20,7 +21,6 @@ import com.yahoo.config.provision.Zone; import com.yahoo.lang.SettableOptional; import com.yahoo.log.LogLevel; import com.yahoo.path.Path; -import com.yahoo.vespa.applicationmodel.ClusterId; import com.yahoo.vespa.config.server.ConfigServerSpec; import com.yahoo.vespa.config.server.application.ApplicationSet; import com.yahoo.vespa.config.server.application.PermanentApplicationPackage; @@ -31,7 +31,7 @@ import com.yahoo.vespa.config.server.http.InvalidApplicationException; import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry; import com.yahoo.vespa.config.server.modelfactory.PreparedModelsBuilder; import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; -import com.yahoo.vespa.config.server.tenant.ContainerEndpoint; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.vespa.config.server.tenant.ContainerEndpointsCache; import com.yahoo.vespa.config.server.tenant.Rotations; import com.yahoo.vespa.curator.Curator; @@ -43,6 +43,7 @@ import javax.xml.transform.TransformerException; import java.io.IOException; import java.net.URI; import java.time.Instant; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -142,6 +143,7 @@ public class SessionPreparer { final Rotations rotations; // TODO: Remove this once we have migrated fully to container endpoints final ContainerEndpointsCache containerEndpoints; final Set<Rotation> rotationsSet; + final Set<ContainerEndpoint> endpointsSet; final ModelContext.Properties properties; private ApplicationPackage applicationPackage; @@ -163,6 +165,7 @@ public class SessionPreparer { this.rotations = new Rotations(curator, tenantPath); this.containerEndpoints = new ContainerEndpointsCache(tenantPath, curator); this.rotationsSet = getRotations(params.rotations()); + this.endpointsSet = getEndpoints(params.containerEndpoints()); this.properties = new ModelContextImpl.Properties(params.getApplicationId(), configserverConfig.multitenant(), ConfigServerSpec.fromConfig(configserverConfig), @@ -172,6 +175,7 @@ public class SessionPreparer { configserverConfig.hostedVespa(), zone, rotationsSet, + endpointsSet, params.isBootstrap(), ! currentActiveApplicationSet.isPresent(), context.getFlagSource()); @@ -266,10 +270,17 @@ public class SessionPreparer { return rotations; } + private Set<ContainerEndpoint> getEndpoints(List<ContainerEndpoint> endpoints) { + if (endpoints == null || endpoints.isEmpty()) { + endpoints = this.containerEndpoints.read(applicationId); + } + return ImmutableSet.copyOf(endpoints); + } + } private static List<ContainerEndpoint> toContainerEndpoints(String globalServceId, Set<Rotation> rotations) { - return List.of(new ContainerEndpoint(new ClusterId(globalServceId), + return List.of(new ContainerEndpoint(globalServceId, rotations.stream() .map(Rotation::getId) .collect(Collectors.toUnmodifiableList()))); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java index 379af7f71ea..4ffce8a697e 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializer.java @@ -1,11 +1,11 @@ // Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.tenant; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.slime.ArrayTraverser; import com.yahoo.slime.Cursor; import com.yahoo.slime.Inspector; import com.yahoo.slime.Slime; -import com.yahoo.vespa.applicationmodel.ClusterId; import java.util.ArrayList; import java.util.List; @@ -18,6 +18,13 @@ import java.util.List; */ public class ContainerEndpointSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String clusterIdField = "clusterId"; private static final String namesField = "names"; @@ -42,7 +49,7 @@ public class ContainerEndpointSerializer { names.add(containerName); }); - return new ContainerEndpoint(new ClusterId(clusterId), names); + return new ContainerEndpoint(clusterId, names); } public static List<ContainerEndpoint> endpointListFromSlime(Slime slime) { diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java index 7e29f9abc1d..9bce1224d96 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCache.java @@ -1,6 +1,7 @@ // Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.tenant; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.provision.ApplicationId; import com.yahoo.path.Path; import com.yahoo.vespa.config.SlimeUtils; diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java index 23326474371..b483705e3f5 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.config.server; import com.yahoo.component.Version; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.model.api.ModelContext; import com.yahoo.config.model.application.provider.BaseDeployLogger; import com.yahoo.config.model.application.provider.MockFileRegistry; @@ -14,6 +15,7 @@ import com.yahoo.vespa.flags.InMemoryFlagSource; import org.junit.Test; import java.util.Collections; +import java.util.List; import java.util.Optional; import java.util.Set; @@ -33,6 +35,10 @@ public class ModelContextImplTest { final Rotation rotation = new Rotation("this.is.a.mock.rotation"); final Set<Rotation> rotations = Collections.singleton(rotation); + + final ContainerEndpoint endpoint = new ContainerEndpoint("foo", List.of("a", "b")); + final Set<ContainerEndpoint> endpoints = Collections.singleton(endpoint); + final InMemoryFlagSource flagSource = new InMemoryFlagSource(); ModelContext context = new ModelContextImpl( @@ -53,6 +59,7 @@ public class ModelContextImplTest { false, Zone.defaultZone(), rotations, + endpoints, false, false, flagSource), @@ -71,6 +78,7 @@ public class ModelContextImplTest { assertNotNull(context.properties().zone()); assertFalse(context.properties().hostedVespa()); assertThat(context.properties().rotations(), equalTo(rotations)); + assertThat(context.properties().endpoints(), equalTo(endpoints)); assertThat(context.properties().isFirstTimeDeployment(), equalTo(false)); assertThat(context.properties().useDedicatedNodeForLogserver(), equalTo(true)); } diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java index 395c1ecb80b..1f99f59eb8e 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java @@ -5,6 +5,7 @@ import com.yahoo.cloud.config.LbServicesConfig; import com.yahoo.config.application.api.ApplicationPackage; import com.yahoo.config.model.NullConfigModelRegistry; import com.yahoo.config.model.api.ApplicationInfo; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.model.api.Model; import com.yahoo.config.model.deploy.DeployState; import com.yahoo.config.model.deploy.TestProperties; @@ -20,11 +21,14 @@ import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.flags.InMemoryFlagSource; import com.yahoo.vespa.model.VespaModel; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.xml.sax.SAXException; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; @@ -33,20 +37,34 @@ import java.util.Random; import java.util.Set; import static com.yahoo.config.model.api.container.ContainerServiceType.QRSERVER; -import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.Matchers.*; +import static org.junit.Assert.*; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; /** * @author Ulf Lilleengen */ +@RunWith(Parameterized.class) public class LbServicesProducerTest { private static final String rotation1 = "rotation-1"; private static final String rotation2 = "rotation-2"; private static final String rotationString = rotation1 + "," + rotation2; private static final Set<Rotation> rotations = Collections.singleton(new Rotation(rotationString)); + private static final Set<ContainerEndpoint> endpoints = Set.of( + new ContainerEndpoint("mydisc", List.of("rotation-1", "rotation-2")) + ); private final InMemoryFlagSource flagSource = new InMemoryFlagSource(); + private final boolean useGlobalServiceId; + + @Parameterized.Parameters + public static Object[] useGlobalServiceId() { + return new Object[] { true, false }; + } + + public LbServicesProducerTest(boolean useGlobalServiceId) { + this.useGlobalServiceId = useGlobalServiceId; + } @Test public void testDeterministicGetConfig() throws IOException, SAXException { @@ -123,20 +141,40 @@ public class LbServicesProducerTest { @Test public void testConfigAliasesWithRotations() throws IOException, SAXException { + assumeTrue(useGlobalServiceId); + Map<TenantName, Set<ApplicationInfo>> testModel = createTestModel(new DeployState.Builder() .rotations(rotations) .properties(new TestProperties().setHostedVespa(true))); RegionName regionName = RegionName.from("us-east-1"); - LbServicesConfig conf = getLbServicesConfig(new Zone(Environment.prod, regionName), testModel); - final LbServicesConfig.Tenants.Applications.Hosts.Services services = conf.tenants("foo").applications("foo:prod:" + regionName.value() + ":default").hosts("foo.foo.yahoo.com").services(QRSERVER.serviceName); - assertThat(services.servicealiases().size(), is(1)); - assertThat(services.endpointaliases().size(), is(4)); - assertThat(services.servicealiases(0), is("service1")); - assertThat(services.endpointaliases(0), is("foo1.bar1.com")); - assertThat(services.endpointaliases(1), is("foo2.bar2.com")); - assertThat(services.endpointaliases(2), is(rotation1)); - assertThat(services.endpointaliases(3), is(rotation2)); + var services = getLbServicesConfig(new Zone(Environment.prod, regionName), testModel) + .tenants("foo") + .applications("foo:prod:" + regionName.value() + ":default") + .hosts("foo.foo.yahoo.com") + .services(QRSERVER.serviceName); + + assertThat(services.servicealiases(), contains("service1")); + assertThat("Missing rotations in list: " + services.endpointaliases(), services.endpointaliases(), containsInAnyOrder("foo1.bar1.com", "foo2.bar2.com", rotation1, rotation2)); + } + + @Test + public void testConfigAliasesWithEndpoints() throws IOException, SAXException { + assumeFalse(useGlobalServiceId); + + Map<TenantName, Set<ApplicationInfo>> testModel = createTestModel(new DeployState.Builder() + .endpoints(endpoints) + .properties(new TestProperties().setHostedVespa(true))); + RegionName regionName = RegionName.from("us-east-1"); + + var services = getLbServicesConfig(new Zone(Environment.prod, regionName), testModel) + .tenants("foo") + .applications("foo:prod:" + regionName.value() + ":default") + .hosts("foo.foo.yahoo.com") + .services(QRSERVER.serviceName); + + assertThat(services.servicealiases(), contains("service1")); + assertThat("Missing endpoints in list: " + services.endpointaliases(), services.endpointaliases(), containsInAnyOrder("foo1.bar1.com", "foo2.bar2.com", rotation1, rotation2)); } private Map<TenantName, Set<ApplicationInfo>> randomizeApplications(Map<TenantName, Set<ApplicationInfo>> testModel, int seed) { @@ -195,14 +233,32 @@ public class LbServicesProducerTest { " <search/>" + "</jdisc>" + "</services>"; - String deploymentInfo ="<?xml version='1.0' encoding='UTF-8'?>" + - "<deployment version='1.0'>" + - " <test />" + - " <prod global-service-id='mydisc'>" + - " <region active='true'>us-east-1</region>" + - " <region active='false'>us-east-2</region>" + - " </prod>" + - "</deployment>"; + + String deploymentInfo; + + if (useGlobalServiceId) { + deploymentInfo ="<?xml version='1.0' encoding='UTF-8'?>" + + "<deployment version='1.0'>" + + " <test />" + + " <prod global-service-id='mydisc'>" + + " <region active='true'>us-east-1</region>" + + " <region active='false'>us-east-2</region>" + + " </prod>" + + "</deployment>"; + } else { + deploymentInfo ="<?xml version='1.0' encoding='UTF-8'?>" + + "<deployment version='1.0'>" + + " <test />" + + " <prod>" + + " <region active='true'>us-east-1</region>" + + " <region active='false'>us-east-2</region>" + + " </prod>" + + " <endpoints>" + + " <endpoint container-id='mydisc' />" + + " </endpoints>" + + "</deployment>"; + } + return new MockApplicationPackage.Builder().withHosts(hosts).withServices(services).withDeploymentSpec(deploymentInfo).build(); } diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java index 6eba85af37e..f5fd6053b07 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java @@ -6,8 +6,7 @@ import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.Rotation; import com.yahoo.config.provision.TenantName; import com.yahoo.container.jdisc.HttpRequest; -import com.yahoo.vespa.applicationmodel.ClusterId; -import com.yahoo.vespa.config.server.tenant.ContainerEndpoint; +import com.yahoo.config.model.api.ContainerEndpoint; import org.junit.Test; import java.net.URLEncoder; @@ -84,10 +83,10 @@ public class PrepareParamsTest { @Test public void testCorrectParsingWithContainerEndpoints() { - var endpoints = List.of(new ContainerEndpoint(new ClusterId("qrs1"), + var endpoints = List.of(new ContainerEndpoint("qrs1", List.of("c1.example.com", "c2.example.com")), - new ContainerEndpoint(new ClusterId("qrs2"), + new ContainerEndpoint("qrs2", List.of("c3.example.com", "c4.example.com"))); var param = "[\n" + diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java index 6b2810af66c..74415993c52 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java @@ -15,7 +15,6 @@ import com.yahoo.io.IOUtils; import com.yahoo.log.LogLevel; import com.yahoo.path.Path; import com.yahoo.slime.Slime; -import com.yahoo.vespa.applicationmodel.ClusterId; import com.yahoo.vespa.config.server.MockReloadHandler; import com.yahoo.vespa.config.server.TestComponentRegistry; import com.yahoo.vespa.config.server.TimeoutBudgetTest; @@ -27,7 +26,7 @@ import com.yahoo.vespa.config.server.http.InvalidApplicationException; import com.yahoo.vespa.config.server.model.TestModelFactory; import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry; import com.yahoo.vespa.config.server.provision.HostProvisionerProvider; -import com.yahoo.vespa.config.server.tenant.ContainerEndpoint; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.vespa.config.server.tenant.ContainerEndpointsCache; import com.yahoo.vespa.config.server.tenant.Rotations; import com.yahoo.vespa.config.server.zookeeper.ConfigCurator; @@ -218,7 +217,7 @@ public class SessionPreparerTest { var params = new PrepareParams.Builder().applicationId(applicationId).rotations(rotations).build(); prepare(new File("src/test/resources/deploy/hosted-app"), params); - var expected = List.of(new ContainerEndpoint(new ClusterId("qrs"), + var expected = List.of(new ContainerEndpoint("qrs", List.of("app1.tenant1.global.vespa.example.com", "rotation-042.vespa.global.routing"))); assertEquals(expected, readContainerEndpoints(applicationId)); @@ -248,10 +247,10 @@ public class SessionPreparerTest { .build(); prepare(new File("src/test/resources/deploy/hosted-app"), params); - var expected = List.of(new ContainerEndpoint(new ClusterId("foo"), + var expected = List.of(new ContainerEndpoint("foo", List.of("foo.app1.tenant1.global.vespa.example.com", "rotation-042.vespa.global.routing")), - new ContainerEndpoint(new ClusterId("bar"), + new ContainerEndpoint("bar", List.of("bar.app1.tenant1.global.vespa.example.com", "rotation-043.vespa.global.routing"))); assertEquals(expected, readContainerEndpoints(applicationId)); diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java index aac0b6d1a16..053a3f7a15d 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointSerializerTest.java @@ -1,7 +1,7 @@ package com.yahoo.vespa.config.server.tenant; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.slime.Slime; -import com.yahoo.vespa.applicationmodel.ClusterId; import org.junit.Test; import java.util.List; @@ -30,7 +30,7 @@ public class ContainerEndpointSerializerTest { @Test public void writeReadSingleEndpoint() { - final var endpoint = new ContainerEndpoint(new ClusterId("foo"), List.of("a", "b")); + final var endpoint = new ContainerEndpoint("foo", List.of("a", "b")); final var serialized = new Slime(); ContainerEndpointSerializer.endpointToSlime(serialized.setObject(), endpoint); final var deserialized = ContainerEndpointSerializer.endpointFromSlime(serialized.get()); @@ -40,7 +40,7 @@ public class ContainerEndpointSerializerTest { @Test public void writeReadEndpoints() { - final var endpoints = List.of(new ContainerEndpoint(new ClusterId("foo"), List.of("a", "b"))); + final var endpoints = List.of(new ContainerEndpoint("foo", List.of("a", "b"))); final var serialized = ContainerEndpointSerializer.endpointListToSlime(endpoints); final var deserialized = ContainerEndpointSerializer.endpointListFromSlime(serialized); diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCacheTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCacheTest.java index 3598b6e63c3..4400b424d1b 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCacheTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/ContainerEndpointsCacheTest.java @@ -1,9 +1,9 @@ // Copyright 2019 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.tenant; +import com.yahoo.config.model.api.ContainerEndpoint; import com.yahoo.config.provision.ApplicationId; import com.yahoo.path.Path; -import com.yahoo.vespa.applicationmodel.ClusterId; import com.yahoo.vespa.curator.mock.MockCurator; import org.junit.Test; @@ -17,7 +17,7 @@ public class ContainerEndpointsCacheTest { public void readWriteFromCache() { final var cache = new ContainerEndpointsCache(Path.createRoot(), new MockCurator()); final var endpoints = List.of( - new ContainerEndpoint(new ClusterId("the-cluster-1"), List.of("a", "b", "c")) + new ContainerEndpoint("the-cluster-1", List.of("a", "b", "c")) ); cache.write(ApplicationId.defaultId(), endpoints); diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json index 9474c9f9160..06713d14d88 100644 --- a/container-search/abi-spec.json +++ b/container-search/abi-spec.json @@ -7148,7 +7148,10 @@ "public com.yahoo.data.access.Inspector inspect()", "public java.lang.String toString()", "public java.lang.String toJson()", - "public java.lang.StringBuilder writeJson(java.lang.StringBuilder)" + "public java.lang.StringBuilder writeJson(java.lang.StringBuilder)", + "public java.lang.Double getDouble(java.lang.String)", + "public com.yahoo.tensor.Tensor getTensor(java.lang.String)", + "public java.util.Set featureNames()" ], "fields": [] }, diff --git a/container-search/src/main/java/com/yahoo/data/JsonProducer.java b/container-search/src/main/java/com/yahoo/data/JsonProducer.java index 6d925b41379..c9dc0946a3e 100644 --- a/container-search/src/main/java/com/yahoo/data/JsonProducer.java +++ b/container-search/src/main/java/com/yahoo/data/JsonProducer.java @@ -12,6 +12,7 @@ public interface JsonProducer { * be human-readable and containing embedded newlines; also the * exact indentation etc may change, so use compact=true for a * canonical format. + * * @param target the StringBuilder to append to. * @return the target passed in is also returned (to allow chaining). */ @@ -20,7 +21,8 @@ public interface JsonProducer { /** * Convenience method equivalent to: * writeJson(new StringBuilder()).toString() - * @return String containing JSON representation of this object's data. + * + * @return a String containing JSON representation of this object's data. */ default String toJson() { return writeJson(new StringBuilder()).toString(); diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FeatureDataField.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FeatureDataField.java index 1f60dd3d1cf..b0003f4321e 100644 --- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FeatureDataField.java +++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FeatureDataField.java @@ -6,9 +6,8 @@ import com.yahoo.data.access.Type; import com.yahoo.search.result.FeatureData; /** - * Class representing a "feature data" field. This was historically - * just a string containing JSON; now it's a structure of - * data (that will be rendered as JSON by default). + * Class representing a "feature data" field: A map of values which are + * either floats or tensors. */ public class FeatureDataField extends LongstringField { @@ -23,12 +22,8 @@ public class FeatureDataField extends LongstringField { @Override public Object convert(Inspector value) { - if (! value.valid()) { - return null; - } - if (value.type() == Type.STRING) { - return value.asString(); - } + if ( ! value.valid()) return null; + if (value.type() == Type.STRING) return value.asString(); return new FeatureData(value); } diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/LongstringField.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/LongstringField.java index 2f9c6d5b325..5de38e43c96 100644 --- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/LongstringField.java +++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/LongstringField.java @@ -5,10 +5,6 @@ */ package com.yahoo.prelude.fastsearch; -import java.nio.ByteBuffer; - -import com.yahoo.io.SlowInflate; -import com.yahoo.text.Utf8; import com.yahoo.data.access.Inspector; /** diff --git a/container-search/src/main/java/com/yahoo/prelude/searcher/JSONDebugSearcher.java b/container-search/src/main/java/com/yahoo/prelude/searcher/JSONDebugSearcher.java index 2330ca2382a..5f921b67702 100644 --- a/container-search/src/main/java/com/yahoo/prelude/searcher/JSONDebugSearcher.java +++ b/container-search/src/main/java/com/yahoo/prelude/searcher/JSONDebugSearcher.java @@ -3,6 +3,7 @@ package com.yahoo.prelude.searcher; import com.yahoo.prelude.fastsearch.FastHit; import com.yahoo.prelude.hitfield.JSONString; +import com.yahoo.search.Query; import com.yahoo.search.Result; import com.yahoo.search.Searcher; import com.yahoo.processing.request.CompoundName; @@ -27,7 +28,7 @@ public class JSONDebugSearcher extends Searcher { private static CompoundName PROPERTYNAME = new CompoundName("dumpjson"); @Override - public Result search(com.yahoo.search.Query query, Execution execution) { + public Result search(Query query, Execution execution) { Result r = execution.search(query); String propertyName = query.properties().getString(PROPERTYNAME); if (propertyName != null) { diff --git a/container-search/src/main/java/com/yahoo/search/result/FeatureData.java b/container-search/src/main/java/com/yahoo/search/result/FeatureData.java index 53e77631ff9..7e5d6b12f30 100644 --- a/container-search/src/main/java/com/yahoo/search/result/FeatureData.java +++ b/container-search/src/main/java/com/yahoo/search/result/FeatureData.java @@ -6,29 +6,42 @@ import com.yahoo.data.access.Inspectable; import com.yahoo.data.access.Type; import com.yahoo.data.JsonProducer; import com.yahoo.data.access.simple.JsonRender; +import com.yahoo.io.GrowableByteBuffer; +import com.yahoo.tensor.Tensor; +import com.yahoo.tensor.serialization.JsonFormat; +import com.yahoo.tensor.serialization.TypedBinaryFormat; + +import java.nio.charset.StandardCharsets; +import java.util.HashSet; +import java.util.Optional; +import java.util.Set; /** - * A wrapper for structured data representing feature values. + * A wrapper for structured data representing feature values: A map of floats and tensors. + * This class is not thread safe even when it is only consumed. */ public class FeatureData implements Inspectable, JsonProducer { private final Inspector value; + private Set<String> featureNames = null; + public FeatureData(Inspector value) { this.value = value; } + /** + * Returns the fields of this as an inspector, where tensors are represented as binary data + * which can be decoded using + * <code>com.yahoo.tensor.serialization.TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(featureValue.asData()))</code> + */ @Override - public Inspector inspect() { - return value; - } + public Inspector inspect() { return value; } + @Override public String toString() { - if (value.type() == Type.EMPTY) { - return ""; - } else { - return toJson(); - } + if (value.type() == Type.EMPTY) return ""; + return toJson(); } @Override @@ -38,7 +51,64 @@ public class FeatureData implements Inspectable, JsonProducer { @Override public StringBuilder writeJson(StringBuilder target) { - return JsonRender.render(value, target, true); + return JsonRender.render(value, new Encoder(target, true)); + } + + /** + * Returns the value of a scalar feature, or null if it is not present. + * + * @throws IllegalArgumentException if the value exists but isn't a scalar + * (that is, if it is a tensor with nonzero rank) + */ + public Double getDouble(String featureName) { + Inspector featureValue = value.field(featureName); + if ( ! featureValue.valid()) return null; + + switch (featureValue.type()) { + case DOUBLE: return featureValue.asDouble(); + case DATA: throw new IllegalArgumentException("Feature '" + featureName + "' is a tensor, not a double"); + default: throw new IllegalStateException("Unexpected feature value type " + featureValue.type()); + } + } + + /** + * Returns the value of a tensor feature, or null if it is not present. + * This will return any feature value: Scalars are returned as a rank 0 tensor. + */ + public Tensor getTensor(String featureName) { + Inspector featureValue = value.field(featureName); + if ( ! featureValue.valid()) return null; + + switch (featureValue.type()) { + case DOUBLE: return Tensor.from(featureValue.asDouble()); + case DATA: return TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(featureValue.asData())); + default: throw new IllegalStateException("Unexpected feature value type " + featureValue.type()); + } + } + + /** Returns the names of the features available in this */ + public Set<String> featureNames() { + if (featureNames != null) return featureNames; + + featureNames = new HashSet<>(); + value.fields().forEach(field -> featureNames.add(field.getKey())); + return featureNames; + } + + /** A JSON encoder which encodes DATA as a tensor */ + private static class Encoder extends JsonRender.StringEncoder { + + Encoder(StringBuilder out, boolean compact) { + super(out, compact); + } + + @Override + public void encodeDATA(byte[] value) { + // This could be done more efficiently ... + target().append(new String(JsonFormat.encodeWithType(TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(value))), + StandardCharsets.UTF_8)); + } + } } diff --git a/container-search/src/main/java/com/yahoo/search/result/PositionsData.java b/container-search/src/main/java/com/yahoo/search/result/PositionsData.java index 483849a5435..203e0206f1e 100644 --- a/container-search/src/main/java/com/yahoo/search/result/PositionsData.java +++ b/container-search/src/main/java/com/yahoo/search/result/PositionsData.java @@ -10,7 +10,7 @@ import com.yahoo.data.access.simple.JsonRender; /** * A wrapper for structured data representing an array of position values. - **/ + */ public class PositionsData implements Inspectable, JsonProducer, XmlProducer { private final Inspector value; diff --git a/container-search/src/main/java/com/yahoo/search/searchchain/FutureResult.java b/container-search/src/main/java/com/yahoo/search/searchchain/FutureResult.java index 453b49cfe71..e02f7ea0e37 100644 --- a/container-search/src/main/java/com/yahoo/search/searchchain/FutureResult.java +++ b/container-search/src/main/java/com/yahoo/search/searchchain/FutureResult.java @@ -16,7 +16,7 @@ import java.util.logging.Level; import java.util.logging.Logger; /** - * Extends a {@code FutureTask<Result>}, with some added error handling + * Extends a {@code FutureTask<Result>}, with some added error handling * * @author bratseth */ diff --git a/container-search/src/test/java/com/yahoo/search/result/FeatureDataTestCase.java b/container-search/src/test/java/com/yahoo/search/result/FeatureDataTestCase.java new file mode 100644 index 00000000000..9cc7cc743fc --- /dev/null +++ b/container-search/src/test/java/com/yahoo/search/result/FeatureDataTestCase.java @@ -0,0 +1,52 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.search.result; + +import com.yahoo.data.access.slime.SlimeAdapter; +import com.yahoo.slime.Cursor; +import com.yahoo.slime.Slime; +import com.yahoo.tensor.Tensor; +import com.yahoo.tensor.serialization.TypedBinaryFormat; +import org.junit.Test; + +import java.util.stream.Collectors; + +import static org.junit.Assert.assertEquals; + +/** + * @author bratseth + */ +public class FeatureDataTestCase { + + private static final double delta = 0.00000001; + + @Test + public void testFeatureData() { + Cursor features = new Slime().setObject(); + features.setDouble("scalar1", 1.5); + features.setDouble("scalar2", 2.5); + Tensor tensor1 = Tensor.from("tensor(x[3]):[1.5, 2, 2.5]"); + features.setData("tensor1", TypedBinaryFormat.encode(tensor1)); + Tensor tensor2 = Tensor.from(0.5); + features.setData("tensor2", TypedBinaryFormat.encode(tensor2)); + + FeatureData featureData = new FeatureData(new SlimeAdapter(features)); + assertEquals("scalar1,scalar2,tensor1,tensor2", + featureData.featureNames().stream().sorted().collect(Collectors.joining(","))); + assertEquals(1.5, featureData.getDouble("scalar1"), delta); + assertEquals(2.5, featureData.getDouble("scalar2"), delta); + assertEquals(Tensor.from(1.5), featureData.getTensor("scalar1")); + assertEquals(Tensor.from(2.5), featureData.getTensor("scalar2")); + assertEquals(tensor1, featureData.getTensor("tensor1")); + assertEquals(tensor2, featureData.getTensor("tensor2")); + + String expectedJson = + "{" + + "\"scalar1\":1.5," + + "\"scalar2\":2.5," + + "\"tensor1\":{\"type\":\"tensor(x[3])\",\"cells\":[{\"address\":{\"x\":\"0\"},\"value\":1.5},{\"address\":{\"x\":\"1\"},\"value\":2.0},{\"address\":{\"x\":\"2\"},\"value\":2.5}]}," + + "\"tensor2\":{\"type\":\"tensor()\",\"cells\":[{\"address\":{},\"value\":0.5}]}" + + "}"; + assertEquals(expectedJson, featureData.toJson()); + } + +} diff --git a/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java b/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java index f802914d291..b82d3f1a8e9 100644 --- a/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/searchchain/test/FutureDataTestCase.java @@ -1,8 +1,6 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.searchchain.test; -import com.google.common.util.concurrent.AbstractFuture; -import com.google.common.util.concurrent.ListenableFuture; import com.yahoo.component.ComponentId; import com.yahoo.processing.response.*; import com.yahoo.search.Query; @@ -34,23 +32,25 @@ import com.yahoo.component.chain.Chain; public class FutureDataTestCase { @Test - public void testAsyncFederation() throws InterruptedException, ExecutionException, TimeoutException { + public void testAsyncFederation() throws InterruptedException, ExecutionException { // Setup environment AsyncProviderSearcher asyncProviderSearcher = new AsyncProviderSearcher(); Searcher syncProviderSearcher = new SyncProviderSearcher(); - Chain<Searcher> asyncSource = new Chain<Searcher>(new ComponentId("async"),asyncProviderSearcher); - Chain<Searcher> syncSource = new Chain<>(new ComponentId("sync"),syncProviderSearcher); + Chain<Searcher> asyncSource = new Chain<>(new ComponentId("async"), asyncProviderSearcher); + Chain<Searcher> syncSource = new Chain<>(new ComponentId("sync"), syncProviderSearcher); SearchChainResolver searchChainResolver= - new SearchChainResolver.Builder().addSearchChain(new ComponentId("sync"),new FederationOptions().setUseByDefault(true)). - addSearchChain(new ComponentId("async"),new FederationOptions().setUseByDefault(true)). + new SearchChainResolver.Builder().addSearchChain(new ComponentId("sync"), new FederationOptions().setUseByDefault(true)). + addSearchChain(new ComponentId("async"), new FederationOptions().setUseByDefault(true)). build(); - Chain<Searcher> main = new Chain<Searcher>(new FederationSearcher(new ComponentId("federator"),searchChainResolver)); + Chain<Searcher> main = new Chain<>(new FederationSearcher(new ComponentId("federator"), searchChainResolver)); SearchChainRegistry searchChainRegistry = new SearchChainRegistry(); searchChainRegistry.register(main); searchChainRegistry.register(syncSource); searchChainRegistry.register(asyncSource); - Result result = new Execution(main, Execution.Context.createContextStub(searchChainRegistry,null)).search(new Query()); + Query query = new Query(); + query.setTimeout(5000); + Result result = new Execution(main, Execution.Context.createContextStub(searchChainRegistry,null)).search(query); assertNotNull(result); HitGroup syncGroup = (HitGroup)result.hits().get("source:sync"); @@ -59,29 +59,29 @@ public class FutureDataTestCase { HitGroup asyncGroup = (HitGroup)result.hits().get("source:async"); assertNotNull(asyncGroup); - assertEquals("Got all sync data",3,syncGroup.size()); - assertEquals("sync:0",syncGroup.get(0).getId().toString()); - assertEquals("sync:1",syncGroup.get(1).getId().toString()); - assertEquals("sync:2",syncGroup.get(2).getId().toString()); + assertEquals("Got all sync data", 3, syncGroup.size()); + assertEquals("sync:0", syncGroup.get(0).getId().toString()); + assertEquals("sync:1", syncGroup.get(1).getId().toString()); + assertEquals("sync:2", syncGroup.get(2).getId().toString()); assertTrue(asyncGroup.incoming()==asyncProviderSearcher.incomingData); - assertEquals("Got no async data yet",0,asyncGroup.size()); + assertEquals("Got no async data yet", 0, asyncGroup.size()); asyncProviderSearcher.simulateOneHitIOComplete(new Hit("async:0")); - assertEquals("Got no async data yet, as we haven't completed the incoming buffer and there is no data listener",0,asyncGroup.size()); + assertEquals("Got no async data yet, as we haven't completed the incoming buffer and there is no data listener", 0, asyncGroup.size()); asyncProviderSearcher.simulateOneHitIOComplete(new Hit("async:1")); asyncProviderSearcher.simulateAllHitsIOComplete(); - assertEquals("Got no async data yet, as we haven't pulled it",0,asyncGroup.size()); + assertEquals("Got no async data yet, as we haven't pulled it", 0, asyncGroup.size()); asyncGroup.complete().get(); - assertEquals("Completed, so we have the data",2,asyncGroup.size()); - assertEquals("async:0",asyncGroup.get(0).getId().toString()); - assertEquals("async:1",asyncGroup.get(1).getId().toString()); + assertEquals("Completed, so we have the data", 2, asyncGroup.size()); + assertEquals("async:0", asyncGroup.get(0).getId().toString()); + assertEquals("async:1", asyncGroup.get(1).getId().toString()); } @Test public void testFutureData() throws InterruptedException, ExecutionException, TimeoutException { // Set up - AsyncProviderSearcher futureDataSource=new AsyncProviderSearcher(); - Chain<Searcher> chain=new Chain<>(Collections.<Searcher>singletonList(futureDataSource)); + AsyncProviderSearcher futureDataSource = new AsyncProviderSearcher(); + Chain<Searcher> chain = new Chain<>(Collections.<Searcher>singletonList(futureDataSource)); // Execute Query query = new Query(); @@ -102,7 +102,7 @@ public class FutureDataTestCase { // Results with future hit groups will be passed to rendering directly and start rendering immediately. // For this test we block and wait for the data instead: result.hits().complete().get(1000, TimeUnit.MILLISECONDS); - assertEquals(2,result.hits().getConcreteSize()); + assertEquals(2, result.hits().getConcreteSize()); } /** @@ -117,7 +117,7 @@ public class FutureDataTestCase { public Result search(Query query, Execution execution) { if (incomingData != null) throw new IllegalArgumentException("This test searcher is one-time use only"); - HitGroup hitGroup=HitGroup.createAsync("Async source"); + HitGroup hitGroup = HitGroup.createAsync("Async source"); this.incomingData = hitGroup.incoming(); // A real implementation would do query.properties().get("jdisc.request") here // to get the jDisc request and use it to spawn a child request to the backend diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java index 94e111455ac..a1beef23dbb 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java @@ -154,7 +154,7 @@ public enum JobType { case test: return Optional.of(systemTest); case staging: return Optional.of(stagingTest); } - return from(system, ZoneId.from(system, environment, region)); + return from(system, ZoneId.from(environment, region)); } } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/Billing.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/Billing.java new file mode 100644 index 00000000000..f716458542c --- /dev/null +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/Billing.java @@ -0,0 +1,12 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.api.integration.organization; + +import com.yahoo.config.provision.ApplicationId; + +/** + * @author olaa + */ +public interface Billing { + + void handleBilling(ApplicationId applicationId, String customerId); +} diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockBilling.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockBilling.java new file mode 100644 index 00000000000..20b77703160 --- /dev/null +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockBilling.java @@ -0,0 +1,13 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.api.integration.organization; + +import com.yahoo.config.provision.ApplicationId; + +/** + * @author olaa + */ +public class MockBilling implements Billing { + + @Override + public void handleBilling(ApplicationId applicationId, String customerId) {} +} diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingMaintainer.java new file mode 100644 index 00000000000..c6956293adf --- /dev/null +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BillingMaintainer.java @@ -0,0 +1,39 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.maintenance; + +import com.yahoo.config.provision.SystemName; +import com.yahoo.vespa.hosted.controller.Controller; +import com.yahoo.vespa.hosted.controller.api.integration.organization.Billing; +import com.yahoo.vespa.hosted.controller.tenant.CloudTenant; + +import java.time.Duration; +import java.util.EnumSet; + +/** + * @author olaa + */ +public class BillingMaintainer extends Maintainer { + + private final Billing billing; + + public BillingMaintainer(Controller controller, Duration interval, JobControl jobControl, Billing billing) { + super(controller, interval, jobControl, BillingMaintainer.class.getSimpleName(), EnumSet.of(SystemName.cd)); + this.billing = billing; + } + + @Override + public void maintain() { + controller().tenants().asList() + .stream() + .filter(tenant -> tenant instanceof CloudTenant) + .map(tenant -> (CloudTenant) tenant) + .forEach(cloudTenant -> controller().applications().asList(cloudTenant.name()) + .stream() + .forEach( application -> { + billing.handleBilling(application.id(), cloudTenant.billingInfo().customerId()); + }) + ); + } +} + + diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java index c1f896e6593..e840deb062c 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java @@ -5,6 +5,7 @@ import com.yahoo.component.AbstractComponent; import com.yahoo.config.provision.zone.ZoneApi; import com.yahoo.jdisc.Metric; import com.yahoo.vespa.hosted.controller.Controller; +import com.yahoo.vespa.hosted.controller.api.integration.organization.Billing; import com.yahoo.vespa.hosted.controller.api.integration.organization.ContactRetriever; import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshotConsumer; import com.yahoo.vespa.hosted.controller.authority.config.ApiAuthorityConfig; @@ -13,7 +14,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.dns.NameService; import com.yahoo.vespa.hosted.controller.api.integration.noderepository.NodeRepositoryClientInterface; import com.yahoo.vespa.hosted.controller.api.integration.organization.DeploymentIssues; import com.yahoo.vespa.hosted.controller.api.integration.organization.OwnershipIssues; -import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.maintenance.config.MaintainerConfig; import com.yahoo.vespa.hosted.controller.persistence.CuratorDb; import com.yahoo.vespa.hosted.controller.restapi.cost.CostReportConsumer; @@ -55,6 +55,7 @@ public class ControllerMaintenance extends AbstractComponent { private final CostReportMaintainer costReportMaintainer; private final ResourceMeterMaintainer resourceMeterMaintainer; private final NameServiceDispatcher nameServiceDispatcher; + private final BillingMaintainer billingMaintainer; @SuppressWarnings("unused") // instantiated by Dependency Injection public ControllerMaintenance(MaintainerConfig maintainerConfig, ApiAuthorityConfig apiAuthorityConfig, Controller controller, CuratorDb curator, @@ -64,6 +65,7 @@ public class ControllerMaintenance extends AbstractComponent { ContactRetriever contactRetriever, CostReportConsumer reportConsumer, ResourceSnapshotConsumer resourceSnapshotConsumer, + Billing billing, SelfHostedCostConfig selfHostedCostConfig) { Duration maintenanceInterval = Duration.ofMinutes(maintainerConfig.intervalMinutes()); this.jobControl = jobControl; @@ -86,6 +88,7 @@ public class ControllerMaintenance extends AbstractComponent { costReportMaintainer = new CostReportMaintainer(controller, Duration.ofHours(2), reportConsumer, jobControl, nodeRepositoryClient, Clock.systemUTC(), selfHostedCostConfig); resourceMeterMaintainer = new ResourceMeterMaintainer(controller, Duration.ofMinutes(60), jobControl, nodeRepositoryClient, Clock.systemUTC(), metric, resourceSnapshotConsumer); nameServiceDispatcher = new NameServiceDispatcher(controller, Duration.ofSeconds(10), jobControl, nameService); + billingMaintainer = new BillingMaintainer(controller, Duration.ofDays(3), jobControl, billing); } public Upgrader upgrader() { return upgrader; } @@ -114,6 +117,7 @@ public class ControllerMaintenance extends AbstractComponent { costReportMaintainer.deconstruct(); resourceMeterMaintainer.deconstruct(); nameServiceDispatcher.deconstruct(); + billingMaintainer.deconstruct(); } /** Create one OS upgrader per cloud found in the zone registry of controller */ diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/InfrastructureUpgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/InfrastructureUpgrader.java index 159eb234aa7..b8bb9a7ef79 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/InfrastructureUpgrader.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/InfrastructureUpgrader.java @@ -3,11 +3,10 @@ package com.yahoo.vespa.hosted.controller.maintenance; import com.yahoo.component.Version; import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.zone.UpgradePolicy; import com.yahoo.config.provision.zone.ZoneApi; import com.yahoo.vespa.hosted.controller.Controller; import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node; -import com.yahoo.config.provision.zone.UpgradePolicy; -import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.hosted.controller.application.SystemApplication; import com.yahoo.yolean.Exceptions; @@ -68,7 +67,9 @@ public abstract class InfrastructureUpgrader extends Maintainer { for (SystemApplication application : applications) { if (convergedOn(target, application.dependencies(), zone)) { boolean currentAppConverged = convergedOn(target, application, zone); - if (!currentAppConverged) { + // In dynamically provisioned zones there may be no tenant hosts at the time of upgrade, so we + // should always set the target version. + if (application == SystemApplication.tenantHost || !currentAppConverged) { upgrade(target, application, zone); } converged &= currentAppConverged; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgrader.java index 5a40dd591fd..156e8d0d242 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgrader.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgrader.java @@ -34,7 +34,7 @@ public class SystemUpgrader extends InfrastructureUpgrader { if (minVersion(zone, application, Node::wantedVersion).map(target::isAfter) .orElse(true)) { log.info(String.format("Deploying %s version %s in %s", application.id(), target, zone.getId())); - controller().applications().deploy(application, zone.toDeprecatedId(), target); + controller().applications().deploy(application, zone.getId(), target); } } @@ -45,7 +45,7 @@ public class SystemUpgrader extends InfrastructureUpgrader { if (minVersion.isEmpty()) return true; return minVersion.get().equals(target) - && application.configConvergedIn(zone.toDeprecatedId(), controller(), Optional.of(target)); + && application.configConvergedIn(zone.getId(), controller(), Optional.of(target)); } @Override diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java index 47c7b54264e..1f20bdf5533 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java @@ -54,6 +54,13 @@ import java.util.TreeMap; */ public class ApplicationSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + // Application fields private final String idField = "id"; private final String createdAtField = "createdAt"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/AuditLogSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/AuditLogSerializer.java index 7fee9a7f9b4..d18e561ce5d 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/AuditLogSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/AuditLogSerializer.java @@ -19,6 +19,13 @@ import java.util.function.Function; */ public class AuditLogSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String entriesField = "entries"; private static final String atField = "at"; private static final String principalField = "principal"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ConfidenceOverrideSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ConfidenceOverrideSerializer.java index a87875da104..2cb981aac03 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ConfidenceOverrideSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ConfidenceOverrideSerializer.java @@ -18,6 +18,13 @@ import java.util.Map; */ public class ConfidenceOverrideSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private final static String overridesField = "overrides"; public Slime toSlime(Map<Version, Confidence> overrides) { diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/LogSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/LogSerializer.java index 40781ac6e92..418038d4f1e 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/LogSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/LogSerializer.java @@ -27,6 +27,13 @@ import java.util.stream.Collectors; */ class LogSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String idField = "id"; private static final String typeField = "type"; private static final String timestampField = "at"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NameServiceQueueSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NameServiceQueueSerializer.java index 3dfb5ffe5f8..e3dedd65e68 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NameServiceQueueSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NameServiceQueueSerializer.java @@ -24,6 +24,13 @@ import java.util.ArrayList; */ public class NameServiceQueueSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String requestsField = "requests"; private static final String requestType = "requestType"; private static final String recordsField = "records"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionSerializer.java index 21f8b1bcb80..d68e24a27ea 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionSerializer.java @@ -20,6 +20,13 @@ import java.util.TreeSet; */ public class OsVersionSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String versionsField = "versions"; private static final String versionField = "version"; private static final String cloudField = "cloud"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionStatusSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionStatusSerializer.java index 3e3c0df1673..88805f54d65 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionStatusSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionStatusSerializer.java @@ -26,6 +26,13 @@ import java.util.TreeMap; */ public class OsVersionStatusSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String versionsField = "versions"; private static final String versionField = "version"; private static final String nodesField = "nodes"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java index a9c6c54a44a..9cfce8dc16a 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java @@ -24,6 +24,13 @@ import java.util.function.Function; */ public class RoutingPolicySerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String routingPoliciesField = "routingPolicies"; private static final String clusterField = "cluster"; private static final String canonicalNameField = "canonicalName"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java index f29af1055d0..1c95c9766f5 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java @@ -56,6 +56,13 @@ import static java.util.Comparator.comparing; */ class RunSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String stepsField = "steps"; private static final String applicationField = "id"; private static final String jobTypeField = "type"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java index 56e80068908..3a4e6c3954c 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java @@ -29,6 +29,13 @@ import java.util.Optional; */ public class TenantSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String nameField = "name"; private static final String typeField = "type"; private static final String athenzDomainField = "athenzDomain"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionSerializer.java index 5edae803fdb..e5897963254 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionSerializer.java @@ -13,6 +13,13 @@ import com.yahoo.slime.Slime; */ public class VersionSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String versionField = "version"; public Slime toSlime(Version version) { diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java index 72d38bbee5f..405a2e452d0 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java @@ -27,6 +27,13 @@ import java.util.Set; */ public class VersionStatusSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + // VersionStatus fields private static final String versionsField = "versions"; diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java index 700e6e9cb42..57f29fb72af 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneFilterMock.java @@ -83,7 +83,7 @@ public class ZoneFilterMock implements ZoneList { @Override public List<ZoneId> ids() { - return List.copyOf(zones.stream().map(ZoneApi::toDeprecatedId).collect(Collectors.toList())); + return List.copyOf(zones.stream().map(ZoneApi::getId).collect(Collectors.toList())); } @Override diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java index 39ff29f4ae0..38aa4af4756 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java @@ -57,16 +57,16 @@ public class OsUpgraderTest { ); // Bootstrap system - tester.configServer().bootstrap(List.of(zone1.toDeprecatedId(), zone2.toDeprecatedId(), zone3.toDeprecatedId(), zone4.toDeprecatedId(), zone5.toDeprecatedId()), + tester.configServer().bootstrap(List.of(zone1.getId(), zone2.getId(), zone3.getId(), zone4.getId(), zone5.getId()), List.of(SystemApplication.tenantHost)); // Add system applications that exist in a real system, but are currently not upgraded - tester.configServer().addNodes(List.of(zone1.toDeprecatedId(), zone2.toDeprecatedId(), zone3.toDeprecatedId(), zone4.toDeprecatedId(), zone5.toDeprecatedId()), + tester.configServer().addNodes(List.of(zone1.getId(), zone2.getId(), zone3.getId(), zone4.getId(), zone5.getId()), List.of(SystemApplication.configServer)); // Fail a few nodes. Failed nodes should not affect versions - failNodeIn(zone1.toDeprecatedId(), SystemApplication.tenantHost); - failNodeIn(zone3.toDeprecatedId(), SystemApplication.tenantHost); + failNodeIn(zone1.getId(), SystemApplication.tenantHost); + failNodeIn(zone3.getId(), SystemApplication.tenantHost); // New OS version released Version version1 = Version.fromString("7.1"); @@ -78,37 +78,37 @@ public class OsUpgraderTest { // zone 1: begins upgrading osUpgrader.maintain(); - assertWanted(version1, SystemApplication.tenantHost, zone1.toDeprecatedId()); + assertWanted(version1, SystemApplication.tenantHost, zone1.getId()); // Other zones remain on previous version (none) - assertWanted(Version.emptyVersion, SystemApplication.proxy, zone2.toDeprecatedId(), zone3.toDeprecatedId(), zone4.toDeprecatedId()); + assertWanted(Version.emptyVersion, SystemApplication.proxy, zone2.getId(), zone3.getId(), zone4.getId()); // zone 1: completes upgrade - completeUpgrade(version1, SystemApplication.tenantHost, zone1.toDeprecatedId()); + completeUpgrade(version1, SystemApplication.tenantHost, zone1.getId()); statusUpdater.maintain(); assertEquals(2, nodesOn(version1).size()); assertEquals(11, nodesOn(Version.emptyVersion).size()); // zone 2 and 3: begins upgrading osUpgrader.maintain(); - assertWanted(version1, SystemApplication.proxy, zone2.toDeprecatedId(), zone3.toDeprecatedId()); + assertWanted(version1, SystemApplication.proxy, zone2.getId(), zone3.getId()); // zone 4: still on previous version - assertWanted(Version.emptyVersion, SystemApplication.tenantHost, zone4.toDeprecatedId()); + assertWanted(Version.emptyVersion, SystemApplication.tenantHost, zone4.getId()); // zone 2 and 3: completes upgrade - completeUpgrade(version1, SystemApplication.tenantHost, zone2.toDeprecatedId(), zone3.toDeprecatedId()); + completeUpgrade(version1, SystemApplication.tenantHost, zone2.getId(), zone3.getId()); // zone 4: begins upgrading osUpgrader.maintain(); - assertWanted(version1, SystemApplication.tenantHost, zone4.toDeprecatedId()); + assertWanted(version1, SystemApplication.tenantHost, zone4.getId()); // zone 4: completes upgrade - completeUpgrade(version1, SystemApplication.tenantHost, zone4.toDeprecatedId()); + completeUpgrade(version1, SystemApplication.tenantHost, zone4.getId()); // Next run does nothing as all zones are upgraded osUpgrader.maintain(); - assertWanted(version1, SystemApplication.tenantHost, zone1.toDeprecatedId(), zone2.toDeprecatedId(), zone3.toDeprecatedId(), zone4.toDeprecatedId()); + assertWanted(version1, SystemApplication.tenantHost, zone1.getId(), zone2.getId(), zone3.getId(), zone4.getId()); statusUpdater.maintain(); assertTrue("All nodes on target version", tester.controller().osVersionStatus().nodesIn(cloud).stream() .allMatch(node -> node.version().equals(version1))); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java index cb5e7cc90a1..7b817c175b8 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java @@ -50,7 +50,7 @@ public class SystemUpgraderTest { Version version1 = Version.fromString("6.5"); // Bootstrap a system without host applications - tester.configServer().bootstrap(List.of(zone1.toDeprecatedId(), zone2.toDeprecatedId(), zone3.toDeprecatedId(), zone4.toDeprecatedId()), + tester.configServer().bootstrap(List.of(zone1.getId(), zone2.getId(), zone3.getId(), zone4.getId()), SystemApplication.configServer, SystemApplication.proxy); // Fail a few nodes. Failed nodes should not affect versions failNodeIn(zone1, SystemApplication.configServer); @@ -144,7 +144,7 @@ public class SystemUpgraderTest { SystemUpgrader systemUpgrader = systemUpgrader(UpgradePolicy.create().upgrade(zone1)); // Bootstrap system - tester.configServer().bootstrap(List.of(zone1.toDeprecatedId()), SystemApplication.configServer, + tester.configServer().bootstrap(List.of(zone1.getId()), SystemApplication.configServer, SystemApplication.proxy); Version version1 = Version.fromString("6.5"); tester.upgradeSystem(version1); @@ -184,7 +184,7 @@ public class SystemUpgraderTest { ); Version version1 = Version.fromString("6.5"); - tester.configServer().bootstrap(List.of(zone1.toDeprecatedId(), zone2.toDeprecatedId(), zone3.toDeprecatedId(), zone4.toDeprecatedId()), SystemApplication.all()); + tester.configServer().bootstrap(List.of(zone1.getId(), zone2.getId(), zone3.getId(), zone4.getId()), SystemApplication.all()); tester.upgradeSystem(version1); systemUpgrader.maintain(); assertCurrentVersion(SystemApplication.all(), version1, zone1, zone2, zone3, zone4); @@ -282,7 +282,7 @@ public class SystemUpgraderTest { public void does_not_deploy_proxy_app_in_zones_without_proxy() { List<SystemApplication> applications = List.of( SystemApplication.configServerHost, SystemApplication.configServer, SystemApplication.tenantHost); - tester.configServer().bootstrap(List.of(zone1.toDeprecatedId()), applications); + tester.configServer().bootstrap(List.of(zone1.getId()), applications); tester.configServer().disallowConvergenceCheck(SystemApplication.proxy.id()); SystemUpgrader systemUpgrader = systemUpgrader(UpgradePolicy.create().upgrade(zone1)); @@ -309,7 +309,7 @@ public class SystemUpgraderTest { private void convergeServices(SystemApplication application, ZoneApi... zones) { for (ZoneApi zone : zones) { - tester.controllerTester().configServer().convergeServices(application.id(), zone.toDeprecatedId()); + tester.controllerTester().configServer().convergeServices(application.id(), zone.getId()); } } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java index e974f55c9f6..68f0738f7a5 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java @@ -72,6 +72,7 @@ public class ControllerContainerTest { " <component id='com.yahoo.vespa.hosted.controller.api.integration.stubs.MockRunDataStore'/>\n" + " <component id='com.yahoo.vespa.hosted.controller.api.integration.organization.MockContactRetriever'/>\n" + " <component id='com.yahoo.vespa.hosted.controller.api.integration.organization.MockIssueHandler'/>\n" + + " <component id='com.yahoo.vespa.hosted.controller.api.integration.organization.MockBilling'/>\n" + " <component id='com.yahoo.vespa.hosted.controller.api.integration.stubs.MockResourceSnapshotConsumer'/>\n" + " <component id='com.yahoo.vespa.hosted.controller.integration.ConfigServerMock'/>\n" + " <component id='com.yahoo.vespa.hosted.controller.integration.NodeRepositoryClientMock'/>\n" + diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json index 01b063c84e1..d4f3e20ac14 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json @@ -4,6 +4,9 @@ "name": "ApplicationOwnershipConfirmer" }, { + "name": "BillingMaintainer" + }, + { "name": "ClusterInfoMaintainer" }, { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java index b2dfd7b4cb6..745a7af203b 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java @@ -81,12 +81,12 @@ public class OsApiTest extends ControllerContainerTest { // Status is updated after some zones are upgraded upgradeAndUpdateStatus(); - completeUpgrade(zone1.toDeprecatedId()); + completeUpgrade(zone1.getId()); assertFile(new Request("http://localhost:8080/os/v1/"), "versions-partially-upgraded.json"); // All zones are upgraded upgradeAndUpdateStatus(); - completeUpgrade(zone2.toDeprecatedId(), zone3.toDeprecatedId()); + completeUpgrade(zone2.getId(), zone3.getId()); assertFile(new Request("http://localhost:8080/os/v1/"), "versions-all-upgraded.json"); // Downgrade with force is permitted diff --git a/dist/vespa.spec b/dist/vespa.spec index f7360419ce6..99b24c2ff8e 100644 --- a/dist/vespa.spec +++ b/dist/vespa.spec @@ -42,17 +42,6 @@ BuildRequires: vespa-protobuf-devel >= 3.7.0-4 BuildRequires: cmake >= 3.9.1 BuildRequires: maven BuildRequires: vespa-protobuf-devel >= 3.7.0-4 -%if 0%{?fc27} -BuildRequires: llvm-devel >= 5.0.2 -BuildRequires: boost-devel >= 1.64 -BuildRequires: vespa-gtest >= 1.8.1-1 -%endif -%if 0%{?fc28} -BuildRequires: llvm-devel >= 6.0.1 -BuildRequires: boost-devel >= 1.66 -BuildRequires: gtest-devel -BuildRequires: gmock-devel -%endif %if 0%{?fc29} BuildRequires: llvm-devel >= 7.0.0 BuildRequires: boost-devel >= 1.66 @@ -125,14 +114,6 @@ Requires: vespa-protobuf >= 3.7.0-4 %endif %if 0%{?fedora} Requires: vespa-protobuf >= 3.7.0-4 -%if 0%{?fc27} -Requires: llvm-libs >= 5.0.2 -%define _vespa_llvm_version 5.0 -%endif -%if 0%{?fc28} -Requires: llvm-libs >= 6.0.1 -%define _vespa_llvm_version 6.0 -%endif %if 0%{?fc29} Requires: llvm-libs >= 7.0.0 %define _vespa_llvm_version 7 diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java index 8cdb0bee7c2..43313392cdb 100644 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java @@ -26,9 +26,8 @@ import com.yahoo.log.LogLevel; import com.yahoo.vespa.hosted.dockerapi.exception.ContainerNotFoundException; import com.yahoo.vespa.hosted.dockerapi.exception.DockerException; import com.yahoo.vespa.hosted.dockerapi.exception.DockerExecTimeoutException; -import com.yahoo.vespa.hosted.dockerapi.metrics.CounterWrapper; -import com.yahoo.vespa.hosted.dockerapi.metrics.Dimensions; -import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.Counter; +import com.yahoo.vespa.hosted.dockerapi.metrics.Metrics; import java.io.ByteArrayOutputStream; import java.time.Duration; @@ -56,19 +55,18 @@ public class DockerImpl implements Docker { private final DockerClient dockerClient; private final DockerImageGarbageCollector dockerImageGC; - private final CounterWrapper numberOfDockerDaemonFails; + private final Counter numberOfDockerApiFails; @Inject - public DockerImpl(MetricReceiverWrapper metricReceiverWrapper) { - this(createDockerClient(), metricReceiverWrapper); + public DockerImpl(Metrics metrics) { + this(createDockerClient(), metrics); } - DockerImpl(DockerClient dockerClient, MetricReceiverWrapper metricReceiver) { + DockerImpl(DockerClient dockerClient, Metrics metrics) { this.dockerClient = dockerClient; this.dockerImageGC = new DockerImageGarbageCollector(this); - Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); - numberOfDockerDaemonFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "daemon.api_fails"); + numberOfDockerApiFails = metrics.declareCounter("docker.api_fails"); } @Override @@ -86,7 +84,7 @@ public class DockerImpl implements Docker { return true; } } catch (RuntimeException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to pull image '" + image.asString() + "'", e); } } @@ -110,7 +108,7 @@ public class DockerImpl implements Docker { } catch (NotFoundException e) { return Optional.empty(); } catch (RuntimeException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to inspect image '" + dockerImage.asString() + "'", e); } } @@ -146,7 +144,7 @@ public class DockerImpl implements Docker { return new ProcessResult(state.getExitCode(), new String(output.toByteArray()), new String(errors.toByteArray())); } catch (RuntimeException | InterruptedException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Container '" + containerName.asString() + "' failed to execute " + Arrays.toString(command), e); } @@ -171,7 +169,7 @@ public class DockerImpl implements Docker { } catch (NotFoundException ignored) { return Optional.empty(); } catch (RuntimeException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to get info for container '" + container + "'", e); } } @@ -186,7 +184,7 @@ public class DockerImpl implements Docker { } catch (NotFoundException ignored) { return Optional.empty(); } catch (RuntimeException | InterruptedException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to get stats for container '" + containerName.asString() + "'", e); } } @@ -200,7 +198,7 @@ public class DockerImpl implements Docker { } catch (NotModifiedException ignored) { // If is already started, ignore } catch (RuntimeException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to start container '" + containerName.asString() + "'", e); } } @@ -214,7 +212,7 @@ public class DockerImpl implements Docker { } catch (NotModifiedException ignored) { // If is already stopped, ignore } catch (RuntimeException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to stop container '" + containerName.asString() + "'", e); } } @@ -226,7 +224,7 @@ public class DockerImpl implements Docker { } catch (NotFoundException e) { throw new ContainerNotFoundException(containerName); } catch (RuntimeException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to delete container '" + containerName.asString() + "'", e); } } @@ -253,7 +251,7 @@ public class DockerImpl implements Docker { } catch (NotFoundException e) { throw new ContainerNotFoundException(containerName); } catch (RuntimeException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to update container '" + containerName.asString() + "' to " + resources, e); } } @@ -307,7 +305,7 @@ public class DockerImpl implements Docker { try { return dockerClient.listContainersCmd().withShowAll(true).exec(); } catch (RuntimeException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to list all containers", e); } } @@ -316,7 +314,7 @@ public class DockerImpl implements Docker { try { return dockerClient.listImagesCmd().withShowAll(true).exec(); } catch (RuntimeException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to list all images", e); } } @@ -327,7 +325,7 @@ public class DockerImpl implements Docker { } catch (NotFoundException ignored) { // Image was already deleted, ignore } catch (RuntimeException e) { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerException("Failed to delete docker image " + dockerImage.asString(), e); } } @@ -357,7 +355,7 @@ public class DockerImpl implements Docker { logger.log(LogLevel.INFO, "Download completed: " + dockerImage.asString()); removeScheduledPoll(dockerImage); } else { - numberOfDockerDaemonFails.add(); + numberOfDockerApiFails.increment(); throw new DockerClientException("Could not download image: " + dockerImage); } } diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Counter.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Counter.java new file mode 100644 index 00000000000..3a0b820c846 --- /dev/null +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Counter.java @@ -0,0 +1,28 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi.metrics; + +/** + * @author freva + */ +public class Counter implements MetricValue { + private final Object lock = new Object(); + + private long value = 0; + + public void increment() { + add(1L); + } + + public void add(long n) { + synchronized (lock) { + value += n; + } + } + + @Override + public Number getValue() { + synchronized (lock) { + return value; + } + } +} diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/CounterWrapper.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/CounterWrapper.java deleted file mode 100644 index 55c42271674..00000000000 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/CounterWrapper.java +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.dockerapi.metrics; - -import com.yahoo.metrics.simple.Counter; - -/** - * Forwards sample to {@link com.yahoo.metrics.simple.Counter} to be displayed in /state/v1/metrics, - * while also saving the value so it can be accessed programatically later. - * - * @author valerijf - */ -public class CounterWrapper implements MetricValue { - private final Object lock = new Object(); - - private final Counter counter; - private long value = 0; - - CounterWrapper(Counter counter) { - this.counter = counter; - } - - public void add() { - add(1L); - } - - public void add(long n) { - synchronized (lock) { - counter.add(n); - value += n; - } - } - - @Override - public Number getValue() { - synchronized (lock) { - return value; - } - } -} diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/DimensionMetrics.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/DimensionMetrics.java index 770ff5e2216..ef59c4b17d6 100644 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/DimensionMetrics.java +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/DimensionMetrics.java @@ -4,47 +4,67 @@ package com.yahoo.vespa.hosted.dockerapi.metrics; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; -import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; /** * @author freva */ public class DimensionMetrics { - private final static ObjectMapper objectMapper = new ObjectMapper(); + private static final ObjectMapper objectMapper = new ObjectMapper(); + private static final Map<String, Object> routing = Map.of("yamas", Map.of("namespaces", List.of("Vespa"))); private final String application; private final Dimensions dimensions; private final Map<String, Number> metrics; DimensionMetrics(String application, Dimensions dimensions, Map<String, Number> metrics) { - this.application = application; - this.dimensions = dimensions; - this.metrics = metrics; + this.application = Objects.requireNonNull(application); + this.dimensions = Objects.requireNonNull(dimensions); + this.metrics = Objects.requireNonNull(metrics); } - Map<String, Object> getMetrics() { - final Map<String, Object> routing = new HashMap<>(); - final Map<String, Object> routingMonitoring = new HashMap<>(); - routing.put("yamas", routingMonitoring); - routingMonitoring.put("namespaces", Collections.singletonList("Vespa")); - - Map<String, Object> report = new HashMap<>(); + public String toSecretAgentReport() throws JsonProcessingException { + Map<String, Object> report = new TreeMap<>(); report.put("application", application); - report.put("dimensions", dimensions.dimensionsMap); - report.put("metrics", metrics); + report.put("dimensions", new TreeMap<>(dimensions.asMap())); + report.put("metrics", new TreeMap<>(metrics)); report.put("routing", routing); - return report; - } - - public String toSecretAgentReport() throws JsonProcessingException { - Map<String, Object> report = getMetrics(); report.put("timestamp", System.currentTimeMillis() / 1000); return objectMapper.writeValueAsString(report); } + public String getApplication() { + return application; + } + + public Dimensions getDimensions() { + return dimensions; + } + + public Map<String, Number> getMetrics() { + return metrics; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DimensionMetrics that = (DimensionMetrics) o; + return application.equals(that.application) && + dimensions.equals(that.dimensions) && + metrics.equals(that.metrics); + } + + @Override + public int hashCode() { + return Objects.hash(application, dimensions, metrics); + } + public static class Builder { private final String application; private final Dimensions dimensions; diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Dimensions.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Dimensions.java index 586622100fb..63b92e06505 100644 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Dimensions.java +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Dimensions.java @@ -1,20 +1,24 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.dockerapi.metrics; -import java.util.Collections; import java.util.HashMap; import java.util.Map; /** - * Each metric reported to secret agent has dimensions. - * - * @author valerijf + * @author freva */ public class Dimensions { - final Map<String, Object> dimensionsMap; - private Dimensions(Map<String, Object> dimensionsMap) { - this.dimensionsMap = dimensionsMap; + public static final Dimensions NONE = new Dimensions(Map.of()); + + private final Map<String, String> dimensionsMap; + + public Dimensions(Map<String, String> dimensionsMap) { + this.dimensionsMap = Map.copyOf(dimensionsMap); + } + + public Map<String, String> asMap() { + return dimensionsMap; } @Override @@ -45,7 +49,7 @@ public class Dimensions { } public Dimensions build() { - return new Dimensions(Collections.unmodifiableMap(new HashMap<>(dimensionsMap))); + return new Dimensions(dimensionsMap); } } } diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Gauge.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Gauge.java new file mode 100644 index 00000000000..b413475fc2b --- /dev/null +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Gauge.java @@ -0,0 +1,24 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi.metrics; + +/** + * @author freva + */ +public class Gauge implements MetricValue { + private final Object lock = new Object(); + + private double value; + + public void sample(double x) { + synchronized (lock) { + this.value = x; + } + } + + @Override + public Number getValue() { + synchronized (lock) { + return value; + } + } +} diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/GaugeWrapper.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/GaugeWrapper.java deleted file mode 100644 index 02e1f15a94f..00000000000 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/GaugeWrapper.java +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.dockerapi.metrics; - -import com.yahoo.metrics.simple.Gauge; - -/** - * Forwards sample to {@link com.yahoo.metrics.simple.Gauge} to be displayed in /state/v1/metrics, - * while also saving the value so it can be accessed programatically later. - * - * @author valerijf - */ -public class GaugeWrapper implements MetricValue { - private final Object lock = new Object(); - - private final Gauge gauge; - private double value; - - GaugeWrapper(Gauge gauge) { - this.gauge = gauge; - } - - public void sample(double x) { - synchronized (lock) { - gauge.sample(x); - this.value = x; - } - } - - @Override - public Number getValue() { - synchronized (lock) { - return value; - } - } -} diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapper.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapper.java deleted file mode 100644 index 58126a59cbb..00000000000 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapper.java +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.dockerapi.metrics; - -import com.google.inject.Inject; -import com.yahoo.metrics.simple.MetricReceiver; -import com.yahoo.metrics.simple.Point; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * Export metrics to both /state/v1/metrics and makes them available programmatically. - * Each metric belongs to a monitoring application - * - * @author freva - */ -public class MetricReceiverWrapper { - // Application names used - public static final String APPLICATION_DOCKER = "docker"; - public static final String APPLICATION_HOST = "vespa.host"; - public static final String APPLICATION_NODE = "vespa.node"; - - private final Object monitor = new Object(); - private final Map<DimensionType, Map<String, ApplicationMetrics>> metrics = new HashMap<>(); - private final MetricReceiver metricReceiver; - - @Inject - public MetricReceiverWrapper(MetricReceiver metricReceiver) { - this.metricReceiver = metricReceiver; - } - - /** - * Declaring the same dimensions and name results in the same CounterWrapper instance (idempotent). - */ - public CounterWrapper declareCounter(String application, Dimensions dimensions, String name) { - return declareCounter(application, dimensions, name, DimensionType.DEFAULT); - } - - public CounterWrapper declareCounter(String application, Dimensions dimensions, String name, DimensionType type) { - synchronized (monitor) { - Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, type); - if (!metricsByDimensions.containsKey(dimensions)) metricsByDimensions.put(dimensions, new HashMap<>()); - if (!metricsByDimensions.get(dimensions).containsKey(name)) { - CounterWrapper counter = new CounterWrapper(metricReceiver.declareCounter(name, new Point(dimensions.dimensionsMap))); - metricsByDimensions.get(dimensions).put(name, counter); - } - - return (CounterWrapper) metricsByDimensions.get(dimensions).get(name); - } - } - - /** - * Declaring the same dimensions and name results in the same GaugeWrapper instance (idempotent). - */ - public GaugeWrapper declareGauge(String application, Dimensions dimensions, String name) { - return declareGauge(application, dimensions, name, DimensionType.DEFAULT); - } - - public GaugeWrapper declareGauge(String application, Dimensions dimensions, String name, DimensionType type) { - synchronized (monitor) { - Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, type); - if (!metricsByDimensions.containsKey(dimensions)) - metricsByDimensions.put(dimensions, new HashMap<>()); - if (!metricsByDimensions.get(dimensions).containsKey(name)) { - GaugeWrapper gauge = new GaugeWrapper(metricReceiver.declareGauge(name, new Point(dimensions.dimensionsMap))); - metricsByDimensions.get(dimensions).put(name, gauge); - } - - return (GaugeWrapper) metricsByDimensions.get(dimensions).get(name); - } - } - - public List<DimensionMetrics> getDefaultMetrics() { - return getMetricsByType(DimensionType.DEFAULT); - } - - // For testing, returns same as getDefaultMetrics(), but without "timestamp" - public Set<Map<String, Object>> getDefaultMetricsRaw() { - synchronized (monitor) { - Set<Map<String, Object>> dimensionMetrics = new HashSet<>(); - metrics.getOrDefault(DimensionType.DEFAULT, new HashMap<>()) - .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() - .map(entry -> new DimensionMetrics(application, entry.getKey(), - entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) - .map(DimensionMetrics::getMetrics) - .forEach(dimensionMetrics::add)); - return dimensionMetrics; - } - } - - public List<DimensionMetrics> getMetricsByType(DimensionType type) { - synchronized (monitor) { - List<DimensionMetrics> dimensionMetrics = new ArrayList<>(); - metrics.getOrDefault(type, new HashMap<>()) - .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() - .map(entry -> new DimensionMetrics(application, entry.getKey(), - entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) - .forEach(dimensionMetrics::add)); - return dimensionMetrics; - } - } - - public void deleteMetricByDimension(String name, Dimensions dimensionsToRemove, DimensionType type) { - synchronized (monitor) { - Optional.ofNullable(metrics.get(type)) - .map(m -> m.get(name)) - .map(ApplicationMetrics::metricsByDimensions) - .ifPresent(m -> m.remove(dimensionsToRemove)); - } - } - - // For testing - Map<String, Number> getMetricsForDimension(String application, Dimensions dimensions) { - synchronized (monitor) { - Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = getOrCreateApplicationMetrics(application, DimensionType.DEFAULT); - return metricsByDimensions.getOrDefault(dimensions, Collections.emptyMap()) - .entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getValue())); - } - } - - private Map<Dimensions, Map<String, MetricValue>> getOrCreateApplicationMetrics(String application, DimensionType type) { - Map<String, ApplicationMetrics> applicationMetrics = metrics.computeIfAbsent(type, m -> new HashMap<>()); - if (! applicationMetrics.containsKey(application)) { - ApplicationMetrics metrics = new ApplicationMetrics(); - applicationMetrics.put(application, metrics); - } - return applicationMetrics.get(application).metricsByDimensions(); - } - - // "Application" is the monitoring application, not Vespa application - private static class ApplicationMetrics { - private final Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = new LinkedHashMap<>(); - - Map<Dimensions, Map<String, MetricValue>> metricsByDimensions() { - return metricsByDimensions; - } - } - - // Used to distinguish whether metrics have been populated with all tag vaules - public enum DimensionType {DEFAULT, PRETAGGED} -} diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java index 7bd4968747f..b20aa1b11ff 100644 --- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java @@ -1,8 +1,8 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.dockerapi.metrics; /** - * @author valerijf + * @author freva */ public interface MetricValue { Number getValue(); diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Metrics.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Metrics.java new file mode 100644 index 00000000000..f9b169f0a93 --- /dev/null +++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/Metrics.java @@ -0,0 +1,128 @@ +// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi.metrics; + +import com.google.inject.Inject; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +/** + * Stores the latest metric for the given application, name, dimension triplet in memory + * + * @author freva + */ +public class Metrics { + // Application names used + public static final String APPLICATION_HOST = "vespa.host"; + public static final String APPLICATION_NODE = "vespa.node"; + + private final Object monitor = new Object(); + private final Map<DimensionType, Map<String, ApplicationMetrics>> metrics = new HashMap<>(); + + @Inject + public Metrics() { } + + /** + * Creates a counter metric under vespa.host application, with no dimensions and default dimension type + * See {@link #declareCounter(String, String, Dimensions, DimensionType)} + */ + public Counter declareCounter(String name) { + return declareCounter(name, Dimensions.NONE); + } + + /** + * Creates a counter metric under vespa.host application, with the given dimensions and default dimension type + * See {@link #declareCounter(String, String, Dimensions, DimensionType)} + */ + public Counter declareCounter(String name, Dimensions dimensions) { + return declareCounter(APPLICATION_HOST, name, dimensions, DimensionType.DEFAULT); + } + + /** Creates a counter metric. This method is idempotent. */ + public Counter declareCounter(String application, String name, Dimensions dimensions, DimensionType type) { + synchronized (monitor) { + return (Counter) getOrCreateApplicationMetrics(application, type) + .computeIfAbsent(dimensions, d -> new HashMap<>()) + .computeIfAbsent(name, n -> new Counter()); + } + } + + /** + * Creates a gauge metric under vespa.host application, with no dimensions and default dimension type + * See {@link #declareGauge(String, String, Dimensions, DimensionType)} + */ + public Gauge declareGauge(String name) { + return declareGauge(name, Dimensions.NONE); + } + + /** + * Creates a gauge metric under vespa.host application, with the given dimensions and default dimension type + * See {@link #declareGauge(String, String, Dimensions, DimensionType)} + */ + public Gauge declareGauge(String name, Dimensions dimensions) { + return declareGauge(APPLICATION_HOST, name, dimensions, DimensionType.DEFAULT); + } + + /** Creates a gauge metric. This method is idempotent */ + public Gauge declareGauge(String application, String name, Dimensions dimensions, DimensionType type) { + synchronized (monitor) { + return (Gauge) getOrCreateApplicationMetrics(application, type) + .computeIfAbsent(dimensions, d -> new HashMap<>()) + .computeIfAbsent(name, n -> new Gauge()); + } + } + + public List<DimensionMetrics> getDefaultMetrics() { + return getMetricsByType(DimensionType.DEFAULT); + } + + public List<DimensionMetrics> getMetricsByType(DimensionType type) { + synchronized (monitor) { + List<DimensionMetrics> dimensionMetrics = new ArrayList<>(); + metrics.getOrDefault(type, Map.of()) + .forEach((application, applicationMetrics) -> applicationMetrics.metricsByDimensions().entrySet().stream() + .map(entry -> new DimensionMetrics(application, entry.getKey(), + entry.getValue().entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, value -> value.getValue().getValue())))) + .forEach(dimensionMetrics::add)); + return dimensionMetrics; + } + } + + public void deleteMetricByDimension(String name, Dimensions dimensionsToRemove, DimensionType type) { + synchronized (monitor) { + Optional.ofNullable(metrics.get(type)) + .map(m -> m.get(name)) + .map(ApplicationMetrics::metricsByDimensions) + .ifPresent(m -> m.remove(dimensionsToRemove)); + } + } + + Map<Dimensions, Map<String, MetricValue>> getOrCreateApplicationMetrics(String application, DimensionType type) { + return metrics.computeIfAbsent(type, m -> new HashMap<>()) + .computeIfAbsent(application, app -> new ApplicationMetrics()) + .metricsByDimensions(); + } + + // "Application" is the monitoring application, not Vespa application + private static class ApplicationMetrics { + private final Map<Dimensions, Map<String, MetricValue>> metricsByDimensions = new LinkedHashMap<>(); + + Map<Dimensions, Map<String, MetricValue>> metricsByDimensions() { + return metricsByDimensions; + } + } + + // Used to distinguish whether metrics have been populated with all tag vaules + public enum DimensionType { + /** Default metrics get added default dimensions set in check config */ + DEFAULT, + + /** Pretagged metrics will only get the dimensions explicitly set when creating the counter/gauge */ + PRETAGGED + } +} diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImplTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImplTest.java index df221302575..4843d8f9685 100644 --- a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImplTest.java +++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImplTest.java @@ -14,8 +14,7 @@ import com.github.dockerjava.api.command.PullImageCmd; import com.github.dockerjava.api.exception.NotFoundException; import com.github.dockerjava.core.command.ExecStartResultCallback; import com.yahoo.config.provision.DockerImage; -import com.yahoo.metrics.simple.MetricReceiver; -import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.Metrics; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.mockito.Matchers; @@ -37,8 +36,8 @@ import static org.mockito.Mockito.when; public class DockerImplTest { private final DockerClient dockerClient = mock(DockerClient.class); - private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); - private final DockerImpl docker = new DockerImpl(dockerClient, metricReceiver); + private final Metrics metrics = new Metrics(); + private final DockerImpl docker = new DockerImpl(dockerClient, metrics); @Test public void testExecuteCompletes() { diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapperTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapperTest.java deleted file mode 100644 index c20e64d906e..00000000000 --- a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapperTest.java +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.dockerapi.metrics; - -import com.yahoo.metrics.simple.MetricReceiver; -import org.junit.Test; - -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -/** - * @author freva - */ -public class MetricReceiverWrapperTest { - private static final Dimensions hostDimension = new Dimensions.Builder().add("host", "abc.yahoo.com").build(); - private static final String applicationDocker = MetricReceiverWrapper.APPLICATION_DOCKER; - - @Test - public void testDefaultValue() { - MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); - - metricReceiver.declareCounter(applicationDocker, hostDimension, "some.name"); - - assertEquals(metricReceiver.getMetricsForDimension(applicationDocker, hostDimension).get("some.name"), 0L); - } - - @Test - public void testSimpleIncrementMetric() { - MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); - CounterWrapper counter = metricReceiver.declareCounter(applicationDocker, hostDimension, "a_counter.value"); - - counter.add(5); - counter.add(8); - - Map<String, Number> latestMetrics = metricReceiver.getMetricsForDimension(applicationDocker, hostDimension); - assertEquals("Expected only 1 metric value to be set", 1, latestMetrics.size()); - assertEquals(latestMetrics.get("a_counter.value"), 13L); // 5 + 8 - } - - @Test - public void testSimpleGauge() { - MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); - GaugeWrapper gauge = metricReceiver.declareGauge(applicationDocker, hostDimension, "test.gauge"); - - gauge.sample(42); - gauge.sample(-342.23); - - Map<String, Number> latestMetrics = metricReceiver.getMetricsForDimension(applicationDocker, hostDimension); - assertEquals("Expected only 1 metric value to be set", 1, latestMetrics.size()); - assertEquals(latestMetrics.get("test.gauge"), -342.23); - } - - @Test - public void testRedeclaringSameGauge() { - MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); - GaugeWrapper gauge = metricReceiver.declareGauge(applicationDocker, hostDimension, "test.gauge"); - gauge.sample(42); - - // Same as hostDimension, but new instance. - Dimensions newDimension = new Dimensions.Builder().add("host", "abc.yahoo.com").build(); - GaugeWrapper newGauge = metricReceiver.declareGauge(applicationDocker, newDimension, "test.gauge"); - newGauge.sample(56); - - assertEquals(metricReceiver.getMetricsForDimension(applicationDocker, hostDimension).get("test.gauge"), 56.); - } - - @Test - public void testSameMetricNameButDifferentDimensions() { - MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); - GaugeWrapper gauge = metricReceiver.declareGauge(applicationDocker, hostDimension, "test.gauge"); - gauge.sample(42); - - // Not the same as hostDimension. - Dimensions newDimension = new Dimensions.Builder().add("host", "abcd.yahoo.com").build(); - GaugeWrapper newGauge = metricReceiver.declareGauge(applicationDocker, newDimension, "test.gauge"); - newGauge.sample(56); - - assertEquals(metricReceiver.getMetricsForDimension(applicationDocker, hostDimension).get("test.gauge"), 42.); - assertEquals(metricReceiver.getMetricsForDimension(applicationDocker, newDimension).get("test.gauge"), 56.); - } - - @Test - public void testDeletingMetric() { - MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); - metricReceiver.declareGauge(applicationDocker, hostDimension, "test.gauge"); - - Dimensions differentDimension = new Dimensions.Builder().add("host", "abcd.yahoo.com").build(); - metricReceiver.declareGauge(applicationDocker, differentDimension, "test.gauge"); - - assertEquals(2, metricReceiver.getDefaultMetricsRaw().size()); - metricReceiver.deleteMetricByDimension(applicationDocker, differentDimension, MetricReceiverWrapper.DimensionType.DEFAULT); - assertEquals(1, metricReceiver.getDefaultMetricsRaw().size()); - assertEquals(metricReceiver.getMetricsForDimension(applicationDocker, hostDimension).size(), 1); - assertEquals(metricReceiver.getMetricsForDimension(applicationDocker, differentDimension).size(), 0); - } -} diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricsTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricsTest.java new file mode 100644 index 00000000000..fc153ee0562 --- /dev/null +++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricsTest.java @@ -0,0 +1,99 @@ +// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.dockerapi.metrics; + +import org.junit.Test; + +import java.util.Map; +import java.util.stream.Collectors; + +import static com.yahoo.vespa.hosted.dockerapi.metrics.Metrics.APPLICATION_HOST; +import static com.yahoo.vespa.hosted.dockerapi.metrics.Metrics.DimensionType.DEFAULT; +import static org.junit.Assert.assertEquals; + +/** + * @author freva + */ +public class MetricsTest { + private static final Dimensions hostDimension = new Dimensions.Builder().add("host", "abc.yahoo.com").build(); + private final Metrics metrics = new Metrics(); + + @Test + public void testDefaultValue() { + metrics.declareCounter("some.name", hostDimension); + + assertEquals(getMetricsForDimension(hostDimension).get("some.name"), 0L); + } + + @Test + public void testSimpleIncrementMetric() { + Counter counter = metrics.declareCounter("a_counter.value", hostDimension); + + counter.add(5); + counter.add(8); + + Map<String, Number> latestMetrics = getMetricsForDimension(hostDimension); + assertEquals("Expected only 1 metric value to be set", 1, latestMetrics.size()); + assertEquals(latestMetrics.get("a_counter.value"), 13L); // 5 + 8 + } + + @Test + public void testSimpleGauge() { + Gauge gauge = metrics.declareGauge("test.gauge", hostDimension); + + gauge.sample(42); + gauge.sample(-342.23); + + Map<String, Number> latestMetrics = getMetricsForDimension(hostDimension); + assertEquals("Expected only 1 metric value to be set", 1, latestMetrics.size()); + assertEquals(latestMetrics.get("test.gauge"), -342.23); + } + + @Test + public void testRedeclaringSameGauge() { + Gauge gauge = metrics.declareGauge("test.gauge", hostDimension); + gauge.sample(42); + + // Same as hostDimension, but new instance. + Dimensions newDimension = new Dimensions.Builder().add("host", "abc.yahoo.com").build(); + Gauge newGauge = metrics.declareGauge("test.gauge", newDimension); + newGauge.sample(56); + + assertEquals(getMetricsForDimension(hostDimension).get("test.gauge"), 56.); + } + + @Test + public void testSameMetricNameButDifferentDimensions() { + Gauge gauge = metrics.declareGauge("test.gauge", hostDimension); + gauge.sample(42); + + // Not the same as hostDimension. + Dimensions newDimension = new Dimensions.Builder().add("host", "abcd.yahoo.com").build(); + Gauge newGauge = metrics.declareGauge("test.gauge", newDimension); + newGauge.sample(56); + + assertEquals(getMetricsForDimension(hostDimension).get("test.gauge"), 42.); + assertEquals(getMetricsForDimension(newDimension).get("test.gauge"), 56.); + } + + @Test + public void testDeletingMetric() { + metrics.declareGauge("test.gauge", hostDimension); + + Dimensions differentDimension = new Dimensions.Builder().add("host", "abcd.yahoo.com").build(); + metrics.declareGauge("test.gauge", differentDimension); + + assertEquals(2, metrics.getMetricsByType(DEFAULT).size()); + metrics.deleteMetricByDimension(APPLICATION_HOST, differentDimension, DEFAULT); + assertEquals(1, metrics.getMetricsByType(DEFAULT).size()); + assertEquals(getMetricsForDimension(hostDimension).size(), 1); + assertEquals(getMetricsForDimension(differentDimension).size(), 0); + } + + private Map<String, Number> getMetricsForDimension(Dimensions dimensions) { + return metrics.getOrCreateApplicationMetrics(APPLICATION_HOST, DEFAULT) + .getOrDefault(dimensions, Map.of()) + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getValue())); + } +} diff --git a/document/src/main/java/com/yahoo/document/json/readers/TensorReader.java b/document/src/main/java/com/yahoo/document/json/readers/TensorReader.java index a3d2a157073..6bdac611fdc 100644 --- a/document/src/main/java/com/yahoo/document/json/readers/TensorReader.java +++ b/document/src/main/java/com/yahoo/document/json/readers/TensorReader.java @@ -3,6 +3,10 @@ package com.yahoo.document.json.readers; import com.yahoo.document.datatypes.TensorFieldValue; import com.yahoo.document.json.TokenBuffer; +import com.yahoo.lang.MutableInteger; +import com.yahoo.slime.ArrayTraverser; +import com.yahoo.slime.Type; +import com.yahoo.tensor.IndexedTensor; import com.yahoo.tensor.MappedTensor; import com.yahoo.tensor.Tensor; @@ -11,54 +15,61 @@ import static com.yahoo.document.json.readers.JsonParserHelpers.*; /** * Reads the tensor format described at * http://docs.vespa.ai/documentation/reference/document-json-format.html#tensor + * + * @author geirst + * @author bratseth */ public class TensorReader { public static final String TENSOR_ADDRESS = "address"; public static final String TENSOR_DIMENSIONS = "dimensions"; public static final String TENSOR_CELLS = "cells"; + public static final String TENSOR_VALUES = "values"; public static final String TENSOR_VALUE = "value"; - public static void fillTensor(TokenBuffer buffer, TensorFieldValue tensorFieldValue) { + static void fillTensor(TokenBuffer buffer, TensorFieldValue tensorFieldValue) { // TODO: Switch implementation to om.yahoo.tensor.serialization.JsonFormat.decode - Tensor.Builder tensorBuilder = Tensor.Builder.of(tensorFieldValue.getDataType().getTensorType()); + Tensor.Builder builder = Tensor.Builder.of(tensorFieldValue.getDataType().getTensorType()); expectObjectStart(buffer.currentToken()); int initNesting = buffer.nesting(); - // read tensor cell fields and ignore everything else for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { - if (TensorReader.TENSOR_CELLS.equals(buffer.currentName())) - readTensorCells(buffer, tensorBuilder); + if (TENSOR_CELLS.equals(buffer.currentName())) + readTensorCells(buffer, builder); + else if (TENSOR_VALUES.equals(buffer.currentName())) + readTensorValues(buffer, builder); + else if (builder.type().dimensions().stream().anyMatch(d -> d.isIndexed())) // sparse can be empty + throw new IllegalArgumentException("Expected a tensor value to contain either 'cells' or 'values'"); } expectObjectEnd(buffer.currentToken()); - tensorFieldValue.assign(tensorBuilder.build()); + tensorFieldValue.assign(builder.build()); } - public static void readTensorCells(TokenBuffer buffer, Tensor.Builder tensorBuilder) { + static void readTensorCells(TokenBuffer buffer, Tensor.Builder builder) { expectArrayStart(buffer.currentToken()); int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) - readTensorCell(buffer, tensorBuilder); + readTensorCell(buffer, builder); expectCompositeEnd(buffer.currentToken()); } - public static void readTensorCell(TokenBuffer buffer, Tensor.Builder tensorBuilder) { + private static void readTensorCell(TokenBuffer buffer, Tensor.Builder builder) { expectObjectStart(buffer.currentToken()); int initNesting = buffer.nesting(); double cellValue = 0.0; - Tensor.Builder.CellBuilder cellBuilder = tensorBuilder.cell(); + Tensor.Builder.CellBuilder cellBuilder = builder.cell(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { String currentName = buffer.currentName(); if (TensorReader.TENSOR_ADDRESS.equals(currentName)) { readTensorAddress(buffer, cellBuilder); } else if (TensorReader.TENSOR_VALUE.equals(currentName)) { - cellValue = Double.valueOf(buffer.currentText()); + cellValue = readDouble(buffer); } } expectObjectEnd(buffer.currentToken()); cellBuilder.value(cellValue); } - public static void readTensorAddress(TokenBuffer buffer, MappedTensor.Builder.CellBuilder cellBuilder) { + private static void readTensorAddress(TokenBuffer buffer, MappedTensor.Builder.CellBuilder cellBuilder) { expectObjectStart(buffer.currentToken()); int initNesting = buffer.nesting(); for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) { @@ -68,4 +79,28 @@ public class TensorReader { } expectObjectEnd(buffer.currentToken()); } + + private static void readTensorValues(TokenBuffer buffer, Tensor.Builder builder) { + if ( ! (builder instanceof IndexedTensor.BoundBuilder)) + throw new IllegalArgumentException("The 'values' field can only be used with dense tensors. " + + "Use 'cells' instead"); + expectArrayStart(buffer.currentToken()); + + IndexedTensor.BoundBuilder indexedBuilder = (IndexedTensor.BoundBuilder)builder; + int index = 0; + int initNesting = buffer.nesting(); + for (buffer.next(); buffer.nesting() >= initNesting; buffer.next()) + indexedBuilder.cellByDirectIndex(index++, readDouble(buffer)); + expectCompositeEnd(buffer.currentToken()); + } + + private static double readDouble(TokenBuffer buffer) { + try { + return Double.valueOf(buffer.currentText()); + } + catch (NumberFormatException e) { + throw new IllegalArgumentException("Expected a number but got '" + buffer.currentText()); + } + } + } diff --git a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java index f8ee23e86ba..69be397595e 100644 --- a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java +++ b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java @@ -52,6 +52,7 @@ import com.yahoo.tensor.IndexedTensor; import com.yahoo.tensor.MappedTensor; import com.yahoo.tensor.Tensor; import com.yahoo.tensor.TensorType; +import com.yahoo.tensor.serialization.JsonFormat; import com.yahoo.text.Utf8; import org.junit.After; import org.junit.Before; @@ -63,6 +64,7 @@ import org.mockito.internal.matchers.Contains; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Base64; import java.util.Collections; @@ -1294,6 +1296,24 @@ public class JsonReaderTestCase { } @Test + public void testParsingOfDenseTensorOnDenseForm() { + Tensor.Builder builder = Tensor.Builder.of(TensorType.fromSpec("tensor(x[2],y[3])")); + builder.cell().label("x", 0).label("y", 0).value(2.0); + builder.cell().label("x", 0).label("y", 1).value(3.0); + builder.cell().label("x", 0).label("y", 2).value(4.0); + builder.cell().label("x", 1).label("y", 0).value(5.0); + builder.cell().label("x", 1).label("y", 1).value(6.0); + builder.cell().label("x", 1).label("y", 2).value(7.0); + Tensor expected = builder.build(); + + Tensor tensor = assertTensorField(expected, + createPutWithTensor(inputJson("{", + " 'values': [2.0, 3.0, 4.0, 5.0, 6.0, 7.0]", + "}"), "dense_tensor"), "dense_tensor"); + assertTrue(tensor instanceof IndexedTensor); // this matters for performance + } + + @Test public void testParsingOfTensorWithSingleCellInDifferentJsonOrder() { assertSparseTensorField("{{x:a,y:b}:2.0}", createPutWithSparseTensor(inputJson("{", @@ -1689,11 +1709,14 @@ public class JsonReaderTestCase { return assertTensorField(expectedTensor, put, "sparse_tensor"); } private static Tensor assertTensorField(String expectedTensor, DocumentPut put, String tensorFieldName) { - final Document doc = put.getDocument(); + return assertTensorField(Tensor.from(expectedTensor), put, tensorFieldName); + } + private static Tensor assertTensorField(Tensor expectedTensor, DocumentPut put, String tensorFieldName) { + Document doc = put.getDocument(); assertEquals("testtensor", doc.getId().getDocType()); assertEquals(TENSOR_DOC_ID, doc.getId().toString()); TensorFieldValue fieldValue = (TensorFieldValue)doc.getFieldValue(doc.getField(tensorFieldName)); - assertEquals(Tensor.from(expectedTensor), fieldValue.getTensor().get()); + assertEquals(expectedTensor, fieldValue.getTensor().get()); return fieldValue.getTensor().get(); } diff --git a/documentapi/src/main/java/com/yahoo/documentapi/SyncSession.java b/documentapi/src/main/java/com/yahoo/documentapi/SyncSession.java index c254df3ba02..418c0374193 100755 --- a/documentapi/src/main/java/com/yahoo/documentapi/SyncSession.java +++ b/documentapi/src/main/java/com/yahoo/documentapi/SyncSession.java @@ -11,8 +11,8 @@ import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol; import java.time.Duration; /** - * <p>A session for synchronous access to a document repository. This class - * provides simple document access where throughput is not a concern.</p> + * A session for synchronous access to a document repository. This class + * provides simple document access where throughput is not a concern. * * @author Simon Thoresen Hult * @author bjorncs @@ -20,16 +20,15 @@ import java.time.Duration; public interface SyncSession extends Session { /** - * <p>Puts a document. When this method returns, the document is safely - * received. This enables setting condition compared to using Document.</p> + * Puts a document. When this method returns, the document is safely + * received. This enables setting condition compared to using Document. * * @param documentPut The DocumentPut operation */ void put(DocumentPut documentPut); /** - * <p>Puts a document. When this method returns, the document is safely - * received.</p> + * Puts a document. When this method returns, the document is safely received. * * @param documentPut The DocumentPut operation * @param priority The priority with which to perform this operation. @@ -39,7 +38,7 @@ public interface SyncSession extends Session { } /** - * <p>Gets a document.</p> + * Gets a document. * * @param id The id of the document to get. * @return The known document having this id, or null if there is no @@ -65,7 +64,7 @@ public interface SyncSession extends Session { } /** - * <p>Gets a document with timeout.</p> + * Gets a document with timeout. * * @param id The id of the document to get. * @param timeout Timeout. If timeout is null, an unspecified default will be used. @@ -77,7 +76,7 @@ public interface SyncSession extends Session { Document get(DocumentId id, Duration timeout); /** - * <p>Gets a document with timeout. </p> + * Gets a document with timeout. * * @param id The id of the document to get. * @param fieldSet A comma-separated list of fields to retrieve @@ -93,12 +92,12 @@ public interface SyncSession extends Session { /** * <p>Removes a document if it is present and condition is fulfilled.</p> * @param documentRemove document to delete - * @return true If the document with this id was removed, false otherwise. + * @return true if the document with this id was removed, false otherwise. */ boolean remove(DocumentRemove documentRemove); /** - * <p>Removes a document if it is present.</p> + * Removes a document if it is present. * * @param documentRemove Document remove operation * @param priority The priority with which to perform this operation. @@ -109,7 +108,7 @@ public interface SyncSession extends Session { boolean remove(DocumentRemove documentRemove, DocumentProtocol.Priority priority); /** - * <p>Updates a document.</p> + * Updates a document. * * @param update The updates to perform. * @return True, if the document was found and updated. @@ -119,7 +118,7 @@ public interface SyncSession extends Session { boolean update(DocumentUpdate update); /** - * <p>Updates a document.</p> + * Updates a document. * * @param update The updates to perform. * @param priority The priority with which to perform this operation. diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/RemoveDocumentReply.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/RemoveDocumentReply.java index 167183acec6..d3c29e5a7be 100755 --- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/RemoveDocumentReply.java +++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/RemoveDocumentReply.java @@ -18,7 +18,7 @@ public class RemoveDocumentReply extends WriteDocumentReply { /** * Returns whether or not the document was found and removed. * - * @return True if document was found. + * @return true if document was found. */ public boolean wasFound() { return found; @@ -27,7 +27,7 @@ public class RemoveDocumentReply extends WriteDocumentReply { /** * Set whether or not the document was found and removed. * - * @param found True if the document was found. + * @param found true if the document was found. */ public void setWasFound(boolean found) { this.found = found; diff --git a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp index 714eb870b3e..9112a8b1712 100644 --- a/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp +++ b/eval/src/tests/eval/interpreted_function/interpreted_function_test.cpp @@ -100,16 +100,10 @@ TEST_FF("require that compiled evaluation passes all conformance tests", MyEvalT //----------------------------------------------------------------------------- -TEST("require that invalid function evaluates to a error") { +TEST("require that invalid function is tagged with error") { std::vector<vespalib::string> params({"x", "y", "z", "w"}); Function function = Function::parse(params, "x & y"); EXPECT_TRUE(function.has_error()); - InterpretedFunction ifun(SimpleTensorEngine::ref(), function, NodeTypes()); - InterpretedFunction::Context ctx(ifun); - SimpleParams my_params({1,2,3,4}); - const Value &result = ifun.eval(ctx, my_params); - EXPECT_TRUE(result.is_error()); - EXPECT_EQUAL(error_value, result.as_double()); } //----------------------------------------------------------------------------- diff --git a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp index 741b756e46f..7bca3d14c28 100644 --- a/eval/src/tests/eval/tensor_function/tensor_function_test.cpp +++ b/eval/src/tests/eval/tensor_function/tensor_function_test.cpp @@ -15,13 +15,12 @@ using namespace vespalib::eval::tensor_function; struct EvalCtx { const TensorEngine &engine; Stash stash; - ErrorValue error; std::vector<Value::UP> tensors; std::vector<Value::CREF> params; InterpretedFunction::UP ifun; std::unique_ptr<InterpretedFunction::Context> ictx; EvalCtx(const TensorEngine &engine_in) - : engine(engine_in), stash(), error(), tensors(), params(), ifun(), ictx() {} + : engine(engine_in), stash(), tensors(), params(), ifun(), ictx() {} ~EvalCtx() {} size_t add_tensor(Value::UP tensor) { size_t id = params.size(); diff --git a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp index 8180a7daef8..5dfde15a0ee 100644 --- a/eval/src/tests/eval/value_cache/tensor_loader_test.cpp +++ b/eval/src/tests/eval/value_cache/tensor_loader_test.cpp @@ -39,11 +39,10 @@ void verify_tensor(const TensorSpec &expect, ConstantValue::UP actual) { } void verify_invalid(ConstantValue::UP actual) { - EXPECT_EQUAL(actual->type(), ValueType::double_type()); - EXPECT_EQUAL(actual->value().as_double(), 0.0); + EXPECT_TRUE(actual->type().is_error()); } -TEST_F("require that invalid types loads an empty double", ConstantTensorLoader(SimpleTensorEngine::ref())) { +TEST_F("require that invalid types gives bad constant value", ConstantTensorLoader(SimpleTensorEngine::ref())) { TEST_DO(verify_invalid(f1.create(TEST_PATH("dense.json"), "invalid type spec"))); } diff --git a/eval/src/vespa/eval/eval/basic_nodes.cpp b/eval/src/vespa/eval/eval/basic_nodes.cpp index 85e00e76803..6138f9ac073 100644 --- a/eval/src/vespa/eval/eval/basic_nodes.cpp +++ b/eval/src/vespa/eval/eval/basic_nodes.cpp @@ -21,8 +21,8 @@ struct Frame { }; struct NoParams : LazyParams { - const Value &resolve(size_t, Stash &stash) const override { - return stash.create<ErrorValue>(); + const Value &resolve(size_t, Stash &) const override { + abort(); } }; diff --git a/eval/src/vespa/eval/eval/interpreted_function.cpp b/eval/src/vespa/eval/eval/interpreted_function.cpp index e362faadf46..208b2db4c3a 100644 --- a/eval/src/vespa/eval/eval/interpreted_function.cpp +++ b/eval/src/vespa/eval/eval/interpreted_function.cpp @@ -91,9 +91,7 @@ InterpretedFunction::eval(Context &ctx, const LazyParams ¶ms) const while (state.program_offset < _program.size()) { _program[state.program_offset++].perform(state); } - if (state.stack.size() != 1) { - state.stack.push_back(state.stash.create<ErrorValue>()); - } + assert(state.stack.size() == 1); return state.stack.back(); } diff --git a/eval/src/vespa/eval/eval/make_tensor_function.cpp b/eval/src/vespa/eval/eval/make_tensor_function.cpp index d84d9f53749..ebf065f32a1 100644 --- a/eval/src/vespa/eval/eval/make_tensor_function.cpp +++ b/eval/src/vespa/eval/eval/make_tensor_function.cpp @@ -142,8 +142,8 @@ struct TensorFunctionBuilder : public NodeVisitor, public NodeTraverser { void visit(const If &node) override { make_if(node); } - void visit(const Error &node) override { - make_const(node, ErrorValue::instance); + void visit(const Error &) override { + abort(); } void visit(const TensorMap &node) override { const auto &token = stash.create<CompileCache::Token::UP>(CompileCache::compile(node.lambda(), PassParams::SEPARATE)); diff --git a/eval/src/vespa/eval/eval/simple_tensor_engine.cpp b/eval/src/vespa/eval/eval/simple_tensor_engine.cpp index c8c9a82d267..25fc98fc00a 100644 --- a/eval/src/vespa/eval/eval/simple_tensor_engine.cpp +++ b/eval/src/vespa/eval/eval/simple_tensor_engine.cpp @@ -40,22 +40,14 @@ const Value &to_value(std::unique_ptr<SimpleTensor> tensor, Stash &stash) { if (tensor->type().is_tensor()) { return *stash.create<Value::UP>(std::move(tensor)); } - if (tensor->type().is_double()) { - return stash.create<DoubleValue>(tensor->as_double()); - } - assert(tensor->type().is_error()); - return ErrorValue::instance; + return stash.create<DoubleValue>(tensor->as_double()); } Value::UP to_value(std::unique_ptr<SimpleTensor> tensor) { if (tensor->type().is_tensor()) { return tensor; } - if (tensor->type().is_double()) { - return std::make_unique<DoubleValue>(tensor->as_double()); - } - assert(tensor->type().is_error()); - return std::make_unique<ErrorValue>(); + return std::make_unique<DoubleValue>(tensor->as_double()); } } // namespace vespalib::eval::<unnamed> diff --git a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp index 8cbc7507592..7e512bb5bf1 100644 --- a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp +++ b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp @@ -298,7 +298,6 @@ struct TestContext { } void test_tensor_create_type() { - TEST_DO(verify_create_type("error")); TEST_DO(verify_create_type("double")); TEST_DO(verify_create_type("tensor(x{})")); TEST_DO(verify_create_type("tensor(x{},y{})")); diff --git a/eval/src/vespa/eval/eval/value.cpp b/eval/src/vespa/eval/eval/value.cpp index 4bfd758f9cd..3629a5ad698 100644 --- a/eval/src/vespa/eval/eval/value.cpp +++ b/eval/src/vespa/eval/eval/value.cpp @@ -6,9 +6,6 @@ namespace vespalib { namespace eval { -ValueType ErrorValue::_type = ValueType::error_type(); -const ErrorValue ErrorValue::instance; - ValueType DoubleValue::_type = ValueType::double_type(); } // namespace vespalib::eval diff --git a/eval/src/vespa/eval/eval/value.h b/eval/src/vespa/eval/eval/value.h index f14034968be..15df44efbac 100644 --- a/eval/src/vespa/eval/eval/value.h +++ b/eval/src/vespa/eval/eval/value.h @@ -20,7 +20,6 @@ constexpr double error_value = 31212.0; struct Value { typedef std::unique_ptr<Value> UP; typedef std::reference_wrapper<const Value> CREF; - virtual bool is_error() const { return false; } virtual bool is_double() const { return false; } virtual bool is_tensor() const { return false; } virtual double as_double() const { return 0.0; } @@ -30,17 +29,6 @@ struct Value { virtual ~Value() {} }; -class ErrorValue : public Value -{ -private: - static ValueType _type; -public: - static const ErrorValue instance; - bool is_error() const override { return true; } - double as_double() const override { return error_value; } - const ValueType &type() const override { return _type; } -}; - class DoubleValue : public Value { private: diff --git a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp index afc8471bdb4..2005caa18ec 100644 --- a/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp +++ b/eval/src/vespa/eval/eval/value_cache/constant_tensor_loader.cpp @@ -75,13 +75,17 @@ ConstantTensorLoader::create(const vespalib::string &path, const vespalib::strin ValueType value_type = ValueType::from_spec(type); if (value_type.is_error()) { LOG(warning, "invalid type specification: %s", type.c_str()); - return std::make_unique<SimpleConstantValue>(_engine.from_spec(TensorSpec("double"))); + return std::make_unique<BadConstantValue>(); } if (ends_with(path, ".tbf")) { vespalib::MappedFileInput file(path); vespalib::Memory content = file.get(); vespalib::nbostream stream(content.data, content.size); - return std::make_unique<SimpleConstantValue>(_engine.decode(stream)); + try { + return std::make_unique<SimpleConstantValue>(_engine.decode(stream)); + } catch (std::exception &) { + return std::make_unique<BadConstantValue>(); + } } Slime slime; decode_json(path, slime); @@ -99,7 +103,11 @@ ConstantTensorLoader::create(const vespalib::string &path, const vespalib::strin cells[i]["address"].traverse(extractor); spec.add(address, cells[i]["value"].asDouble()); } - return std::make_unique<SimpleConstantValue>(_engine.from_spec(spec)); + try { + return std::make_unique<SimpleConstantValue>(_engine.from_spec(spec)); + } catch (std::exception &) { + return std::make_unique<BadConstantValue>(); + } } } // namespace vespalib::eval diff --git a/eval/src/vespa/eval/eval/value_cache/constant_value.h b/eval/src/vespa/eval/eval/value_cache/constant_value.h index ba7fe6fcf3d..a288ad70b53 100644 --- a/eval/src/vespa/eval/eval/value_cache/constant_value.h +++ b/eval/src/vespa/eval/eval/value_cache/constant_value.h @@ -30,6 +30,15 @@ public: const Value &value() const override { return *_value; } }; +class BadConstantValue : public ConstantValue { +private: + const ValueType _type; +public: + BadConstantValue() : _type(ValueType::error_type()) {} + const ValueType &type() const override { return _type; } + const Value &value() const override { abort(); } +}; + /** * An abstract factory of constant values. The typical use-case for * this will be to load constant values from file with a cache on top diff --git a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp index 2206cde49a9..a265ae5ae85 100644 --- a/eval/src/vespa/eval/tensor/default_tensor_engine.cpp +++ b/eval/src/vespa/eval/tensor/default_tensor_engine.cpp @@ -33,7 +33,6 @@ namespace vespalib::tensor { using eval::Aggr; using eval::Aggregator; using eval::DoubleValue; -using eval::ErrorValue; using eval::TensorFunction; using eval::TensorSpec; using eval::Value; @@ -83,7 +82,7 @@ const Value &to_default(const Value &value, Stash &stash) { const Value &to_value(std::unique_ptr<Tensor> tensor, Stash &stash) { if (!tensor) { - return ErrorValue::instance; + return stash.create<DoubleValue>(eval::error_value); } if (tensor->type().is_tensor()) { return *stash.create<Value::UP>(std::move(tensor)); @@ -93,7 +92,7 @@ const Value &to_value(std::unique_ptr<Tensor> tensor, Stash &stash) { Value::UP to_value(std::unique_ptr<Tensor> tensor) { if (!tensor) { - return std::make_unique<ErrorValue>(); + return std::make_unique<DoubleValue>(eval::error_value); } if (tensor->type().is_tensor()) { return tensor; @@ -171,7 +170,7 @@ DefaultTensorEngine::from_spec(const TensorSpec &spec) const { ValueType type = ValueType::from_spec(spec.type()); if (type.is_error()) { - return std::make_unique<ErrorValue>(); + bad_spec(spec); } else if (type.is_double()) { double value = spec.cells().empty() ? 0.0 : spec.cells().begin()->second.value; return std::make_unique<DoubleValue>(value); @@ -273,9 +272,7 @@ DefaultTensorEngine::optimize(const TensorFunction &expr, Stash &stash) const const Value & DefaultTensorEngine::map(const Value &a, map_fun_t function, Stash &stash) const { - if (a.is_double()) { - return stash.create<DoubleValue>(function(a.as_double())); - } else if (auto tensor = a.as_tensor()) { + if (auto tensor = a.as_tensor()) { assert(&tensor->engine() == this); const tensor::Tensor &my_a = static_cast<const tensor::Tensor &>(*tensor); if (!tensor::Tensor::supported({my_a.type()})) { @@ -284,63 +281,49 @@ DefaultTensorEngine::map(const Value &a, map_fun_t function, Stash &stash) const CellFunctionFunAdapter cell_function(function); return to_value(my_a.apply(cell_function), stash); } else { - return ErrorValue::instance; + return stash.create<DoubleValue>(function(a.as_double())); } } const Value & DefaultTensorEngine::join(const Value &a, const Value &b, join_fun_t function, Stash &stash) const { - if (a.is_double()) { - if (b.is_double()) { - return stash.create<DoubleValue>(function(a.as_double(), b.as_double())); - } else if (auto tensor_b = b.as_tensor()) { + if (auto tensor_a = a.as_tensor()) { + assert(&tensor_a->engine() == this); + const tensor::Tensor &my_a = static_cast<const tensor::Tensor &>(*tensor_a); + if (auto tensor_b = b.as_tensor()) { assert(&tensor_b->engine() == this); const tensor::Tensor &my_b = static_cast<const tensor::Tensor &>(*tensor_b); - if (!tensor::Tensor::supported({my_b.type()})) { + if (!tensor::Tensor::supported({my_a.type(), my_b.type()})) { return fallback_join(a, b, function, stash); } - CellFunctionBindLeftAdapter cell_function(function, a.as_double()); - return to_value(my_b.apply(cell_function), stash); + return to_value(my_a.join(function, my_b), stash); } else { - return ErrorValue::instance; - } - } else if (auto tensor_a = a.as_tensor()) { - assert(&tensor_a->engine() == this); - const tensor::Tensor &my_a = static_cast<const tensor::Tensor &>(*tensor_a); - if (b.is_double()) { if (!tensor::Tensor::supported({my_a.type()})) { return fallback_join(a, b, function, stash); } CellFunctionBindRightAdapter cell_function(function, b.as_double()); return to_value(my_a.apply(cell_function), stash); - } else if (auto tensor_b = b.as_tensor()) { + } + } else { + if (auto tensor_b = b.as_tensor()) { assert(&tensor_b->engine() == this); const tensor::Tensor &my_b = static_cast<const tensor::Tensor &>(*tensor_b); - if (!tensor::Tensor::supported({my_a.type(), my_b.type()})) { + if (!tensor::Tensor::supported({my_b.type()})) { return fallback_join(a, b, function, stash); } - return to_value(my_a.join(function, my_b), stash); + CellFunctionBindLeftAdapter cell_function(function, a.as_double()); + return to_value(my_b.apply(cell_function), stash); } else { - return ErrorValue::instance; + return stash.create<DoubleValue>(function(a.as_double(), b.as_double())); } - } else { - return ErrorValue::instance; } } const Value & DefaultTensorEngine::reduce(const Value &a, Aggr aggr, const std::vector<vespalib::string> &dimensions, Stash &stash) const { - if (a.is_double()) { - if (dimensions.empty()) { - Aggregator &aggregator = Aggregator::create(aggr, stash); - aggregator.first(a.as_double()); - return stash.create<DoubleValue>(aggregator.result()); - } else { - return ErrorValue::instance; - } - } else if (auto tensor = a.as_tensor()) { + if (auto tensor = a.as_tensor()) { assert(&tensor->engine() == this); const tensor::Tensor &my_a = static_cast<const tensor::Tensor &>(*tensor); if (!tensor::Tensor::supported({my_a.type()})) { @@ -360,7 +343,13 @@ DefaultTensorEngine::reduce(const Value &a, Aggr aggr, const std::vector<vespali return fallback_reduce(a, aggr, dimensions, stash); } } else { - return ErrorValue::instance; + if (dimensions.empty()) { + Aggregator &aggregator = Aggregator::create(aggr, stash); + aggregator.first(a.as_double()); + return stash.create<DoubleValue>(aggregator.result()); + } else { + return stash.create<DoubleValue>(eval::error_value); + } } } diff --git a/logserver/src/main/java/com/yahoo/logserver/handlers/archive/LogWriter.java b/logserver/src/main/java/com/yahoo/logserver/handlers/archive/LogWriter.java index 47a9b04291d..83d6a4a0def 100644 --- a/logserver/src/main/java/com/yahoo/logserver/handlers/archive/LogWriter.java +++ b/logserver/src/main/java/com/yahoo/logserver/handlers/archive/LogWriter.java @@ -46,11 +46,7 @@ public class LogWriter { * </UL> */ private Writer nextWriter() throws IOException { - - if (writer != null) { - writer.close(); - } - + close(); int maxAttempts = 1000; while (maxAttempts-- > 0) { String name = prefix + "-" + generation++; @@ -119,15 +115,15 @@ public class LogWriter { } - public void flush() throws IOException { + public synchronized void flush() throws IOException { if (writer != null) { writer.flush(); } } - public void close() throws IOException { - flush(); + public synchronized void close() throws IOException { if (writer != null) { + writer.flush(); writer.close(); writer = null; } diff --git a/logserver/src/main/java/com/yahoo/logserver/handlers/archive/LogWriterLRUCache.java b/logserver/src/main/java/com/yahoo/logserver/handlers/archive/LogWriterLRUCache.java index 3d692297f1c..5c1da722f57 100644 --- a/logserver/src/main/java/com/yahoo/logserver/handlers/archive/LogWriterLRUCache.java +++ b/logserver/src/main/java/com/yahoo/logserver/handlers/archive/LogWriterLRUCache.java @@ -14,7 +14,7 @@ import java.util.Map; public class LogWriterLRUCache extends LinkedHashMap<Integer, LogWriter> { private static final Logger log = Logger.getLogger(LogWriterLRUCache.class.getName()); - final int maxEntries = 100; + final int maxEntries = 5; public LogWriterLRUCache(int initialCapacity, float loadFactor) { super(initialCapacity, loadFactor, true); diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Softmax.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Softmax.java new file mode 100644 index 00000000000..cdacbe1656a --- /dev/null +++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/operations/Softmax.java @@ -0,0 +1,40 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.rankingexpression.importer.operations; + +import ai.vespa.rankingexpression.importer.OrderedTensorType; +import com.yahoo.tensor.functions.TensorFunction; + +import java.util.List; + +/** + * Convert imported 'softmax' operation to the Vespa softmax ranking function. + * + * @author lesters + */ +public class Softmax extends IntermediateOperation { + + public Softmax(String modelName, String nodeName, List<IntermediateOperation> inputs) { + super(modelName, nodeName, inputs); + } + + @Override + protected OrderedTensorType lazyGetType() { + if ( ! allInputTypesPresent(1)) return null; + return inputs.get(0).type().get(); + } + + @Override + protected TensorFunction lazyGetFunction() { + if ( ! allInputFunctionsPresent(1)) return null; + + OrderedTensorType inputType = inputs.get(0).type().get(); + String dimension = inputType.dimensions().get(0).name(); + if (inputType.rank() == 2) { + dimension = inputType.dimensions().get(1).name(); // assumption: first dimension is batch dimension + } + + TensorFunction inputFunction = inputs.get(0).function().get(); + return new com.yahoo.tensor.functions.Softmax(inputFunction, dimension); + } + +} diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/tensorflow/GraphImporter.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/tensorflow/GraphImporter.java index 1abbd0063a1..357794faee2 100644 --- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/tensorflow/GraphImporter.java +++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/tensorflow/GraphImporter.java @@ -2,6 +2,7 @@ package ai.vespa.rankingexpression.importer.tensorflow; +import ai.vespa.rankingexpression.importer.operations.Softmax; import ai.vespa.rankingexpression.importer.operations.Sum; import com.yahoo.searchlib.rankingexpression.evaluation.TensorValue; import ai.vespa.rankingexpression.importer.IntermediateGraph; @@ -112,6 +113,7 @@ class GraphImporter { case "elu": return new Map(modelName, nodeName, inputs, ScalarFunctions.elu()); case "relu": return new Map(modelName, nodeName, inputs, ScalarFunctions.relu()); case "selu": return new Map(modelName, nodeName, inputs, ScalarFunctions.selu()); + case "softmax": return new Softmax(modelName, nodeName, inputs); // state ops case "variable": return new Constant(modelName, nodeName, nodeType); diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/BatchNormImportTestCase.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/BatchNormImportTestCase.java index 1b8d06bf964..e75c7fd4da3 100644 --- a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/BatchNormImportTestCase.java +++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/BatchNormImportTestCase.java @@ -22,7 +22,7 @@ public class BatchNormImportTestCase { "src/test/models/tensorflow/batch_norm/saved"); ImportedModel.Signature signature = model.get().signature("serving_default"); - assertEquals("Has skipped outputs", + assertEquals("Should have no skipped outputs", 0, model.get().signature("serving_default").skippedOutputs().size()); diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/DropoutImportTestCase.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/DropoutImportTestCase.java index 5e5c81ddcf1..b9d767774be 100644 --- a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/DropoutImportTestCase.java +++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/DropoutImportTestCase.java @@ -29,7 +29,7 @@ public class DropoutImportTestCase { ImportedModel.Signature signature = model.get().signature("serving_default"); - Assert.assertEquals("Has skipped outputs", + Assert.assertEquals("Should have no skipped outputs", 0, model.get().signature("serving_default").skippedOutputs().size()); ImportedMlFunction function = signature.outputFunction("y", "y"); diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/MnistImportTestCase.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/MnistImportTestCase.java index 6b3e9207fad..c13ed84f701 100644 --- a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/MnistImportTestCase.java +++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/MnistImportTestCase.java @@ -19,7 +19,7 @@ public class MnistImportTestCase { public void testMnistImport() { TestableTensorFlowModel model = new TestableTensorFlowModel("test", "src/test/models/tensorflow/mnist/saved"); ImportedModel.Signature signature = model.get().signature("serving_default"); - Assert.assertEquals("Has skipped outputs", + Assert.assertEquals("Should have no skipped outputs", 0, model.get().signature("serving_default").skippedOutputs().size()); ImportedMlFunction output = signature.outputFunction("y", "y"); diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/SoftmaxImportTestCase.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/SoftmaxImportTestCase.java new file mode 100644 index 00000000000..525f915b252 --- /dev/null +++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/SoftmaxImportTestCase.java @@ -0,0 +1,29 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.rankingexpression.importer.tensorflow; + +import ai.vespa.rankingexpression.importer.ImportedModel; +import ai.vespa.rankingexpression.importer.configmodelview.ImportedMlFunction; +import org.junit.Assert; +import org.junit.Test; + + +import static org.junit.Assert.assertNotNull; + +/** + * @author lesters + */ +public class SoftmaxImportTestCase { + + @Test + public void testSoftmaxImport() { + TestableTensorFlowModel model = new TestableTensorFlowModel("test", "src/test/models/tensorflow/softmax/saved", 1, 5); + ImportedModel.Signature signature = model.get().signature("serving_default"); + Assert.assertEquals("Should have no skipped outputs", + 0, model.get().signature("serving_default").skippedOutputs().size()); + + ImportedMlFunction output = signature.outputFunction("y", "y"); + assertNotNull(output); + model.assertEqualResult("input", "output"); + } + +} diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/TestableTensorFlowModel.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/TestableTensorFlowModel.java index 4ff0c96d369..9d2f8cf0692 100644 --- a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/TestableTensorFlowModel.java +++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/tensorflow/TestableTensorFlowModel.java @@ -33,14 +33,20 @@ public class TestableTensorFlowModel { private ImportedModel model; // Sizes of the input vector - private final int d0Size = 1; - private final int d1Size = 784; + private int d0Size = 1; + private int d1Size = 784; public TestableTensorFlowModel(String modelName, String modelDir) { tensorFlowModel = SavedModelBundle.load(modelDir, "serve"); model = new TensorFlowImporter().importModel(modelName, modelDir, tensorFlowModel); } + public TestableTensorFlowModel(String modelName, String modelDir, int d0Size, int d1Size) { + this(modelName, modelDir); + this.d0Size = d0Size; + this.d1Size = d1Size; + } + public ImportedModel get() { return model; } /** Compare that summing the tensors produce the same result to within some tolerance delta */ diff --git a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/vespa/VespaImportTestCase.java b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/vespa/VespaImportTestCase.java index c7210e6710a..faa603d0ab0 100644 --- a/model-integration/src/test/java/ai/vespa/rankingexpression/importer/vespa/VespaImportTestCase.java +++ b/model-integration/src/test/java/ai/vespa/rankingexpression/importer/vespa/VespaImportTestCase.java @@ -31,11 +31,11 @@ public class VespaImportTestCase { assertEquals("tensor(x[3])", model.inputs().get("input2").toString()); assertEquals(2, model.smallConstants().size()); - assertEquals("tensor(x[3]):{{x:0}:0.5,{x:1}:1.5,{x:2}:2.5}", model.smallConstants().get("constant1")); + assertEquals("tensor(x[3]):[0.5, 1.5, 2.5]", model.smallConstants().get("constant1")); assertEquals("tensor():{3.0}", model.smallConstants().get("constant2")); assertEquals(1, model.largeConstants().size()); - assertEquals("tensor(x[3]):{{x:0}:0.5,{x:1}:1.5,{x:2}:2.5}", model.largeConstants().get("constant1asLarge")); + assertEquals("tensor(x[3]):[0.5, 1.5, 2.5]", model.largeConstants().get("constant1asLarge")); assertEquals(2, model.expressions().size()); assertEquals("reduce(reduce(input1 * input2, sum, name) * constant1, max, x) * constant2", diff --git a/model-integration/src/test/models/tensorflow/softmax/saved/saved_model.pbtxt b/model-integration/src/test/models/tensorflow/softmax/saved/saved_model.pbtxt new file mode 100644 index 00000000000..11435ce3fa1 --- /dev/null +++ b/model-integration/src/test/models/tensorflow/softmax/saved/saved_model.pbtxt @@ -0,0 +1,1999 @@ +saved_model_schema_version: 1 +meta_graphs { + meta_info_def { + stripped_op_list { + op { + name: "Add" + input_arg { + name: "x" + type_attr: "T" + } + input_arg { + name: "y" + type_attr: "T" + } + output_arg { + name: "z" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_BFLOAT16 + type: DT_HALF + type: DT_FLOAT + type: DT_DOUBLE + type: DT_UINT8 + type: DT_INT8 + type: DT_INT16 + type: DT_INT32 + type: DT_INT64 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + type: DT_STRING + } + } + } + } + op { + name: "Assign" + input_arg { + name: "ref" + type_attr: "T" + is_ref: true + } + input_arg { + name: "value" + type_attr: "T" + } + output_arg { + name: "output_ref" + type_attr: "T" + is_ref: true + } + attr { + name: "T" + type: "type" + } + attr { + name: "validate_shape" + type: "bool" + default_value { + b: true + } + } + attr { + name: "use_locking" + type: "bool" + default_value { + b: true + } + } + allows_uninitialized_input: true + } + op { + name: "Const" + output_arg { + name: "output" + type_attr: "dtype" + } + attr { + name: "value" + type: "tensor" + } + attr { + name: "dtype" + type: "type" + } + } + op { + name: "Identity" + input_arg { + name: "input" + type_attr: "T" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "T" + type: "type" + } + } + op { + name: "MatMul" + input_arg { + name: "a" + type_attr: "T" + } + input_arg { + name: "b" + type_attr: "T" + } + output_arg { + name: "product" + type_attr: "T" + } + attr { + name: "transpose_a" + type: "bool" + default_value { + b: false + } + } + attr { + name: "transpose_b" + type: "bool" + default_value { + b: false + } + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_BFLOAT16 + type: DT_HALF + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT32 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + } + } + } + } + op { + name: "MergeV2Checkpoints" + input_arg { + name: "checkpoint_prefixes" + type: DT_STRING + } + input_arg { + name: "destination_prefix" + type: DT_STRING + } + attr { + name: "delete_old_dirs" + type: "bool" + default_value { + b: true + } + } + is_stateful: true + } + op { + name: "Mul" + input_arg { + name: "x" + type_attr: "T" + } + input_arg { + name: "y" + type_attr: "T" + } + output_arg { + name: "z" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_BFLOAT16 + type: DT_HALF + type: DT_FLOAT + type: DT_DOUBLE + type: DT_UINT8 + type: DT_INT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT32 + type: DT_INT64 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + } + } + } + is_commutative: true + } + op { + name: "NoOp" + } + op { + name: "Pack" + input_arg { + name: "values" + type_attr: "T" + number_attr: "N" + } + output_arg { + name: "output" + type_attr: "T" + } + attr { + name: "N" + type: "int" + has_minimum: true + minimum: 1 + } + attr { + name: "T" + type: "type" + } + attr { + name: "axis" + type: "int" + default_value { + i: 0 + } + } + } + op { + name: "Placeholder" + output_arg { + name: "output" + type_attr: "dtype" + } + attr { + name: "dtype" + type: "type" + } + attr { + name: "shape" + type: "shape" + default_value { + shape { + unknown_rank: true + } + } + } + } + op { + name: "RandomUniform" + input_arg { + name: "shape" + type_attr: "T" + } + output_arg { + name: "output" + type_attr: "dtype" + } + attr { + name: "seed" + type: "int" + default_value { + i: 0 + } + } + attr { + name: "seed2" + type: "int" + default_value { + i: 0 + } + } + attr { + name: "dtype" + type: "type" + allowed_values { + list { + type: DT_HALF + type: DT_BFLOAT16 + type: DT_FLOAT + type: DT_DOUBLE + } + } + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_INT32 + type: DT_INT64 + } + } + } + is_stateful: true + } + op { + name: "Relu" + input_arg { + name: "features" + type_attr: "T" + } + output_arg { + name: "activations" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_FLOAT + type: DT_DOUBLE + type: DT_INT32 + type: DT_UINT8 + type: DT_INT16 + type: DT_INT8 + type: DT_INT64 + type: DT_BFLOAT16 + type: DT_UINT16 + type: DT_HALF + type: DT_UINT32 + type: DT_UINT64 + type: DT_QINT8 + } + } + } + } + op { + name: "RestoreV2" + input_arg { + name: "prefix" + type: DT_STRING + } + input_arg { + name: "tensor_names" + type: DT_STRING + } + input_arg { + name: "shape_and_slices" + type: DT_STRING + } + output_arg { + name: "tensors" + type_list_attr: "dtypes" + } + attr { + name: "dtypes" + type: "list(type)" + has_minimum: true + minimum: 1 + } + is_stateful: true + } + op { + name: "SaveV2" + input_arg { + name: "prefix" + type: DT_STRING + } + input_arg { + name: "tensor_names" + type: DT_STRING + } + input_arg { + name: "shape_and_slices" + type: DT_STRING + } + input_arg { + name: "tensors" + type_list_attr: "dtypes" + } + attr { + name: "dtypes" + type: "list(type)" + has_minimum: true + minimum: 1 + } + is_stateful: true + } + op { + name: "ShardedFilename" + input_arg { + name: "basename" + type: DT_STRING + } + input_arg { + name: "shard" + type: DT_INT32 + } + input_arg { + name: "num_shards" + type: DT_INT32 + } + output_arg { + name: "filename" + type: DT_STRING + } + } + op { + name: "Softmax" + input_arg { + name: "logits" + type_attr: "T" + } + output_arg { + name: "softmax" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_HALF + type: DT_BFLOAT16 + type: DT_FLOAT + type: DT_DOUBLE + } + } + } + } + op { + name: "StringJoin" + input_arg { + name: "inputs" + type: DT_STRING + number_attr: "N" + } + output_arg { + name: "output" + type: DT_STRING + } + attr { + name: "N" + type: "int" + has_minimum: true + minimum: 1 + } + attr { + name: "separator" + type: "string" + default_value { + s: "" + } + } + } + op { + name: "Sub" + input_arg { + name: "x" + type_attr: "T" + } + input_arg { + name: "y" + type_attr: "T" + } + output_arg { + name: "z" + type_attr: "T" + } + attr { + name: "T" + type: "type" + allowed_values { + list { + type: DT_BFLOAT16 + type: DT_HALF + type: DT_FLOAT + type: DT_DOUBLE + type: DT_UINT8 + type: DT_INT8 + type: DT_UINT16 + type: DT_INT16 + type: DT_INT32 + type: DT_INT64 + type: DT_COMPLEX64 + type: DT_COMPLEX128 + } + } + } + } + op { + name: "VariableV2" + output_arg { + name: "ref" + type_attr: "dtype" + is_ref: true + } + attr { + name: "shape" + type: "shape" + } + attr { + name: "dtype" + type: "type" + } + attr { + name: "container" + type: "string" + default_value { + s: "" + } + } + attr { + name: "shared_name" + type: "string" + default_value { + s: "" + } + } + is_stateful: true + } + } + tags: "serve" + tensorflow_version: "1.12.0" + tensorflow_git_version: "v1.12.0-rc2-3-ga6d8ffae09" + } + graph_def { + node { + name: "input" + op: "Placeholder" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: -1 + } + dim { + size: 5 + } + } + } + } + } + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "shape" + value { + shape { + dim { + size: -1 + } + dim { + size: 5 + } + } + } + } + } + node { + name: "random_uniform/shape" + op: "Const" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 2 + } + } + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 2 + } + } + tensor_content: "\005\000\000\000\003\000\000\000" + } + } + } + } + node { + name: "random_uniform/min" + op: "Const" + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } + } + node { + name: "random_uniform/max" + op: "Const" + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 1.0 + } + } + } + } + node { + name: "random_uniform/RandomUniform" + op: "RandomUniform" + input: "random_uniform/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "seed" + value { + i: 0 + } + } + attr { + key: "seed2" + value { + i: 0 + } + } + } + node { + name: "random_uniform/sub" + op: "Sub" + input: "random_uniform/max" + input: "random_uniform/min" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + } + node { + name: "random_uniform/mul" + op: "Mul" + input: "random_uniform/RandomUniform" + input: "random_uniform/sub" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + } + } + node { + name: "random_uniform" + op: "Add" + input: "random_uniform/mul" + input: "random_uniform/min" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + } + } + node { + name: "weights" + op: "VariableV2" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "container" + value { + s: "" + } + } + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "shape" + value { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + attr { + key: "shared_name" + value { + s: "" + } + } + } + node { + name: "weights/Assign" + op: "Assign" + input: "weights" + input: "random_uniform" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@weights" + } + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "use_locking" + value { + b: true + } + } + attr { + key: "validate_shape" + value { + b: true + } + } + } + node { + name: "weights/read" + op: "Identity" + input: "weights" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@weights" + } + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + } + } + node { + name: "random_uniform_1/shape" + op: "Const" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 1 + } + } + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + dim { + size: 1 + } + } + int_val: 3 + } + } + } + } + node { + name: "random_uniform_1/min" + op: "Const" + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 0.0 + } + } + } + } + node { + name: "random_uniform_1/max" + op: "Const" + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_FLOAT + tensor_shape { + } + float_val: 1.0 + } + } + } + } + node { + name: "random_uniform_1/RandomUniform" + op: "RandomUniform" + input: "random_uniform_1/shape" + attr { + key: "T" + value { + type: DT_INT32 + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 3 + } + } + } + } + } + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "seed" + value { + i: 0 + } + } + attr { + key: "seed2" + value { + i: 0 + } + } + } + node { + name: "random_uniform_1/sub" + op: "Sub" + input: "random_uniform_1/max" + input: "random_uniform_1/min" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + } + node { + name: "random_uniform_1/mul" + op: "Mul" + input: "random_uniform_1/RandomUniform" + input: "random_uniform_1/sub" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 3 + } + } + } + } + } + } + node { + name: "random_uniform_1" + op: "Add" + input: "random_uniform_1/mul" + input: "random_uniform_1/min" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 3 + } + } + } + } + } + } + node { + name: "bias" + op: "VariableV2" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 3 + } + } + } + } + } + attr { + key: "container" + value { + s: "" + } + } + attr { + key: "dtype" + value { + type: DT_FLOAT + } + } + attr { + key: "shape" + value { + shape { + dim { + size: 3 + } + } + } + } + attr { + key: "shared_name" + value { + s: "" + } + } + } + node { + name: "bias/Assign" + op: "Assign" + input: "bias" + input: "random_uniform_1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@bias" + } + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 3 + } + } + } + } + } + attr { + key: "use_locking" + value { + b: true + } + } + attr { + key: "validate_shape" + value { + b: true + } + } + } + node { + name: "bias/read" + op: "Identity" + input: "bias" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@bias" + } + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 3 + } + } + } + } + } + } + node { + name: "MatMul" + op: "MatMul" + input: "input" + input: "weights/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: -1 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "transpose_a" + value { + b: false + } + } + attr { + key: "transpose_b" + value { + b: false + } + } + } + node { + name: "add" + op: "Add" + input: "MatMul" + input: "bias/read" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: -1 + } + dim { + size: 3 + } + } + } + } + } + } + node { + name: "Relu" + op: "Relu" + input: "add" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: -1 + } + dim { + size: 3 + } + } + } + } + } + } + node { + name: "output" + op: "Softmax" + input: "Relu" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: -1 + } + dim { + size: 3 + } + } + } + } + } + } + node { + name: "init" + op: "NoOp" + input: "^bias/Assign" + input: "^weights/Assign" + } + node { + name: "save/Const" + op: "Const" + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "model" + } + } + } + } + node { + name: "save/StringJoin/inputs_1" + op: "Const" + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + } + string_val: "_temp_6341ee658682497a95c4fd82a2c87cc6/part" + } + } + } + } + node { + name: "save/StringJoin" + op: "StringJoin" + input: "save/Const" + input: "save/StringJoin/inputs_1" + attr { + key: "N" + value { + i: 2 + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + attr { + key: "separator" + value { + s: "" + } + } + } + node { + name: "save/num_shards" + op: "Const" + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 1 + } + } + } + } + node { + name: "save/ShardedFilename/shard" + op: "Const" + device: "/device:CPU:0" + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + attr { + key: "dtype" + value { + type: DT_INT32 + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_INT32 + tensor_shape { + } + int_val: 0 + } + } + } + } + node { + name: "save/ShardedFilename" + op: "ShardedFilename" + input: "save/StringJoin" + input: "save/ShardedFilename/shard" + input: "save/num_shards" + device: "/device:CPU:0" + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + } + node { + name: "save/SaveV2/tensor_names" + op: "Const" + device: "/device:CPU:0" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 2 + } + } + } + } + } + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + dim { + size: 2 + } + } + string_val: "bias" + string_val: "weights" + } + } + } + } + node { + name: "save/SaveV2/shape_and_slices" + op: "Const" + device: "/device:CPU:0" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 2 + } + } + } + } + } + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + dim { + size: 2 + } + } + string_val: "" + string_val: "" + } + } + } + } + node { + name: "save/SaveV2" + op: "SaveV2" + input: "save/ShardedFilename" + input: "save/SaveV2/tensor_names" + input: "save/SaveV2/shape_and_slices" + input: "bias" + input: "weights" + device: "/device:CPU:0" + attr { + key: "dtypes" + value { + list { + type: DT_FLOAT + type: DT_FLOAT + } + } + } + } + node { + name: "save/control_dependency" + op: "Identity" + input: "save/ShardedFilename" + input: "^save/SaveV2" + device: "/device:CPU:0" + attr { + key: "T" + value { + type: DT_STRING + } + } + attr { + key: "_class" + value { + list { + s: "loc:@save/ShardedFilename" + } + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + } + node { + name: "save/MergeV2Checkpoints/checkpoint_prefixes" + op: "Pack" + input: "save/ShardedFilename" + input: "^save/control_dependency" + device: "/device:CPU:0" + attr { + key: "N" + value { + i: 1 + } + } + attr { + key: "T" + value { + type: DT_STRING + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 1 + } + } + } + } + } + attr { + key: "axis" + value { + i: 0 + } + } + } + node { + name: "save/MergeV2Checkpoints" + op: "MergeV2Checkpoints" + input: "save/MergeV2Checkpoints/checkpoint_prefixes" + input: "save/Const" + device: "/device:CPU:0" + attr { + key: "delete_old_dirs" + value { + b: true + } + } + } + node { + name: "save/Identity" + op: "Identity" + input: "save/Const" + input: "^save/MergeV2Checkpoints" + input: "^save/control_dependency" + device: "/device:CPU:0" + attr { + key: "T" + value { + type: DT_STRING + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + } + } + } + } + } + node { + name: "save/RestoreV2/tensor_names" + op: "Const" + device: "/device:CPU:0" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 2 + } + } + } + } + } + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + dim { + size: 2 + } + } + string_val: "bias" + string_val: "weights" + } + } + } + } + node { + name: "save/RestoreV2/shape_and_slices" + op: "Const" + device: "/device:CPU:0" + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 2 + } + } + } + } + } + attr { + key: "dtype" + value { + type: DT_STRING + } + } + attr { + key: "value" + value { + tensor { + dtype: DT_STRING + tensor_shape { + dim { + size: 2 + } + } + string_val: "" + string_val: "" + } + } + } + } + node { + name: "save/RestoreV2" + op: "RestoreV2" + input: "save/Const" + input: "save/RestoreV2/tensor_names" + input: "save/RestoreV2/shape_and_slices" + device: "/device:CPU:0" + attr { + key: "_output_shapes" + value { + list { + shape { + unknown_rank: true + } + shape { + unknown_rank: true + } + } + } + } + attr { + key: "dtypes" + value { + list { + type: DT_FLOAT + type: DT_FLOAT + } + } + } + } + node { + name: "save/Assign" + op: "Assign" + input: "bias" + input: "save/RestoreV2" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@bias" + } + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 3 + } + } + } + } + } + attr { + key: "use_locking" + value { + b: true + } + } + attr { + key: "validate_shape" + value { + b: true + } + } + } + node { + name: "save/Assign_1" + op: "Assign" + input: "weights" + input: "save/RestoreV2:1" + attr { + key: "T" + value { + type: DT_FLOAT + } + } + attr { + key: "_class" + value { + list { + s: "loc:@weights" + } + } + } + attr { + key: "_output_shapes" + value { + list { + shape { + dim { + size: 5 + } + dim { + size: 3 + } + } + } + } + } + attr { + key: "use_locking" + value { + b: true + } + } + attr { + key: "validate_shape" + value { + b: true + } + } + } + node { + name: "save/restore_shard" + op: "NoOp" + input: "^save/Assign" + input: "^save/Assign_1" + } + node { + name: "save/restore_all" + op: "NoOp" + input: "^save/restore_shard" + } + versions { + producer: 27 + } + } + saver_def { + filename_tensor_name: "save/Const:0" + save_tensor_name: "save/Identity:0" + restore_op_name: "save/restore_all" + max_to_keep: 5 + sharded: true + keep_checkpoint_every_n_hours: 10000.0 + version: V2 + } + collection_def { + key: "trainable_variables" + value { + bytes_list { + value: "\n\tweights:0\022\016weights/Assign\032\016weights/read:02\020random_uniform:08\001" + value: "\n\006bias:0\022\013bias/Assign\032\013bias/read:02\022random_uniform_1:08\001" + } + } + } + collection_def { + key: "variables" + value { + bytes_list { + value: "\n\tweights:0\022\016weights/Assign\032\016weights/read:02\020random_uniform:08\001" + value: "\n\006bias:0\022\013bias/Assign\032\013bias/read:02\022random_uniform_1:08\001" + } + } + } + signature_def { + key: "serving_default" + value { + inputs { + key: "x" + value { + name: "input:0" + dtype: DT_FLOAT + tensor_shape { + dim { + size: -1 + } + dim { + size: 5 + } + } + } + } + outputs { + key: "y" + value { + name: "output:0" + dtype: DT_FLOAT + tensor_shape { + dim { + size: -1 + } + dim { + size: 3 + } + } + } + } + method_name: "tensorflow/serving/predict" + } + } +} diff --git a/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.data-00000-of-00001 b/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.data-00000-of-00001 Binary files differnew file mode 100644 index 00000000000..a9edaf376d0 --- /dev/null +++ b/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.data-00000-of-00001 diff --git a/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.index b/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.index Binary files differnew file mode 100644 index 00000000000..0ae49491ce6 --- /dev/null +++ b/model-integration/src/test/models/tensorflow/softmax/saved/variables/variables.index diff --git a/model-integration/src/test/models/tensorflow/softmax/softmax.py b/model-integration/src/test/models/tensorflow/softmax/softmax.py new file mode 100644 index 00000000000..aab9956f914 --- /dev/null +++ b/model-integration/src/test/models/tensorflow/softmax/softmax.py @@ -0,0 +1,29 @@ +# Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +import numpy as np +import tensorflow as tf + +# Creates simple random neural network that has softmax on output. No training. + +n_inputs = 5 +n_outputs = 3 + +input = tf.placeholder(tf.float32, shape=(None, n_inputs), name="input") +W = tf.Variable(tf.random.uniform([n_inputs, n_outputs]), name="weights") +b = tf.Variable(tf.random.uniform([n_outputs]), name="bias") +Z = tf.matmul(input, W) + b +hidden_layer = tf.nn.relu(Z) +output_layer = tf.nn.softmax(hidden_layer, name="output") + +init = tf.global_variables_initializer() + +with tf.Session() as sess: + init.run() + export_path = "saved" + builder = tf.saved_model.builder.SavedModelBuilder(export_path) + signature = tf.saved_model.signature_def_utils.predict_signature_def(inputs = {'x':input}, outputs = {'y':output_layer}) + builder.add_meta_graph_and_variables(sess, + [tf.saved_model.tag_constants.SERVING], + signature_def_map={'serving_default':signature}) + builder.save(as_text=True) + diff --git a/node-admin/src/main/application/services.xml b/node-admin/src/main/application/services.xml index db00c686c99..d5a4dce7c5a 100644 --- a/node-admin/src/main/application/services.xml +++ b/node-admin/src/main/application/services.xml @@ -5,7 +5,7 @@ <!-- Please update container test when changing this file --> <accesslog type="vespa" fileNamePattern="logs/vespa/node-admin/access.log.%Y%m%d%H%M%S" symlinkName="access.log" /> <component id="docker-api" class="com.yahoo.vespa.hosted.dockerapi.DockerImpl" bundle="docker-api"/> - <component id="metrics-wrapper" class="com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper" bundle="docker-api"/> + <component id="metrics" class="com.yahoo.vespa.hosted.dockerapi.metrics.Metrics" bundle="docker-api"/> <preprocess:include file="variant.xml" required="false"/> </container> diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java index 0e6000c651b..73c86fc8de1 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/ConfigServerApiImpl.java @@ -7,7 +7,6 @@ import com.yahoo.config.provision.HostName; import com.yahoo.vespa.athenz.identity.ServiceIdentitySslSocketFactory; import com.yahoo.vespa.athenz.identity.SiaIdentityProvider; import com.yahoo.vespa.hosted.node.admin.component.ConfigServerInfo; -import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger; import org.apache.http.HttpHeaders; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.CloseableHttpResponse; @@ -38,6 +37,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Optional; +import java.util.logging.Logger; /** * Retries request on config server a few times before giving up. Assumes that all requests should be sent with @@ -47,7 +47,7 @@ import java.util.Optional; * @author bjorncs */ public class ConfigServerApiImpl implements ConfigServerApi { - private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(ConfigServerApiImpl.class); + private static final Logger logger = Logger.getLogger(ConfigServerApiImpl.class.getName()); private final ObjectMapper mapper = new ObjectMapper(); @@ -106,7 +106,7 @@ public class ConfigServerApiImpl implements ConfigServerApi { try { return mapper.readValue(response.getEntity().getContent(), wantedReturnType); } catch (IOException e) { - throw new RuntimeException("Failed parse response from config server", e); + throw new UncheckedIOException("Failed parse response from config server", e); } } catch (HttpException e) { if (!e.isRetryable()) throw e; @@ -117,15 +117,15 @@ public class ConfigServerApiImpl implements ConfigServerApi { // Failure to communicate with a config server is not abnormal during upgrades if (e.getMessage().contains("(Connection refused)")) { - NODE_ADMIN_LOGGER.info("Connection refused to " + configServer + " (upgrading?), will try next"); + logger.info("Connection refused to " + configServer + " (upgrading?), will try next"); } else { - NODE_ADMIN_LOGGER.warning("Failed to communicate with " + configServer + ", will try next: " + e.getMessage()); + logger.warning("Failed to communicate with " + configServer + ", will try next: " + e.getMessage()); } } } - throw new RuntimeException("All requests against the config servers (" - + configServers + ") failed, last as follows:", lastException); + throw HttpException.handleException( + "All requests against the config servers (" + configServers + ") failed, last as follows:", lastException); } @Override diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/HttpException.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/HttpException.java index 256fe38ec68..3825107bfa6 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/HttpException.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/HttpException.java @@ -1,13 +1,19 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.configserver; +import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException; +import org.apache.http.NoHttpResponseException; + import javax.ws.rs.core.Response; +import java.io.EOFException; +import java.net.SocketException; +import java.net.SocketTimeoutException; /** * @author hakonhall */ @SuppressWarnings("serial") -public class HttpException extends RuntimeException { +public class HttpException extends ConvergenceException { private final boolean isRetryable; @@ -21,7 +27,12 @@ public class HttpException extends RuntimeException { this.isRetryable = isRetryable; } - public boolean isRetryable() { + private HttpException(String message) { + super(message); + this.isRetryable = false; + } + + boolean isRetryable() { return isRetryable; } @@ -55,6 +66,22 @@ public class HttpException extends RuntimeException { throw new HttpException(status, message, true); } + /** + * Returns {@link HttpException} if the given Throwable is of a known and well understood error or + * a RuntimeException with the given exception as cause otherwise. + */ + public static RuntimeException handleException(String prefix, Throwable t) { + for (; t != null; t = t.getCause()) { + if (t instanceof SocketException || + t instanceof SocketTimeoutException || + t instanceof NoHttpResponseException || + t instanceof EOFException) + return new HttpException(prefix + t.getMessage()); + } + + return new RuntimeException(prefix, t); + } + public static class NotFoundException extends HttpException { public NotFoundException(String message) { super(Response.Status.NOT_FOUND, message, false); diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeMembership.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeMembership.java index bb16e2bae63..22633f67463 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeMembership.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeMembership.java @@ -19,19 +19,19 @@ public class NodeMembership { this.retired = retired; } - public String getClusterType() { + public String clusterType() { return clusterType; } - public String getClusterId() { + public String clusterId() { return clusterId; } - public String getGroup() { + public String group() { return group; } - public int getIndex() { + public int index() { return index; } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeOwner.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeOwner.java index c1900316bb9..c41e050d534 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeOwner.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeOwner.java @@ -17,15 +17,15 @@ public class NodeOwner { this.instance = instance; } - public String getTenant() { + public String tenant() { return tenant; } - public String getApplication() { + public String application() { return application; } - public String getInstance() { + public String instance() { return instance; } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeRepositoryException.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeRepositoryException.java index 6094518c3fc..10e61d373d2 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeRepositoryException.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeRepositoryException.java @@ -1,7 +1,9 @@ // Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.configserver.noderepository; -public class NodeRepositoryException extends RuntimeException { +import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException; + +public class NodeRepositoryException extends ConvergenceException { public NodeRepositoryException(String message) { super(message); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java index d402e75ff7b..6fb6d44bd6f 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java @@ -17,7 +17,7 @@ import java.util.Set; public class NodeSpec { private final String hostname; private final NodeState state; - private final NodeType nodeType; + private final NodeType type; private final String flavor; private final String canonicalFlavor; @@ -25,7 +25,7 @@ public class NodeSpec { private final Optional<DockerImage> currentDockerImage; private final Optional<Version> wantedVespaVersion; - private final Optional<Version> vespaVersion; + private final Optional<Version> currentVespaVersion; private final Optional<Version> wantedOsVersion; private final Optional<Version> currentOsVersion; @@ -46,9 +46,9 @@ public class NodeSpec { private final Optional<NodeOwner> owner; private final Optional<NodeMembership> membership; - private final double minCpuCores; - private final double minMainMemoryAvailableGb; - private final double minDiskAvailableGb; + private final double vcpus; + private final double memoryGb; + private final double diskGb; private final boolean fastDisk; private final double bandwidth; @@ -64,11 +64,11 @@ public class NodeSpec { Optional<DockerImage> wantedDockerImage, Optional<DockerImage> currentDockerImage, NodeState state, - NodeType nodeType, + NodeType type, String flavor, String canonicalFlavor, Optional<Version> wantedVespaVersion, - Optional<Version> vespaVersion, + Optional<Version> currentVespaVersion, Optional<Version> wantedOsVersion, Optional<Version> currentOsVersion, Optional<Boolean> allowedToBeDown, @@ -82,25 +82,32 @@ public class NodeSpec { Optional<Instant> wantedFirmwareCheck, Optional<Instant> currentFirmwareCheck, Optional<String> modelName, - double minCpuCores, - double minMainMemoryAvailableGb, - double minDiskAvailableGb, + double vcpus, + double memoryGb, + double diskGb, boolean fastDisk, double bandwidth, Set<String> ipAddresses, Set<String> additionalIpAddresses, NodeReports reports, Optional<String> parentHostname) { + if (state == NodeState.active) { + Objects.requireNonNull(wantedVespaVersion, "Unknown vespa version for active node"); + Objects.requireNonNull(wantedDockerImage, "Unknown docker image for active node"); + Objects.requireNonNull(wantedRestartGeneration, "Unknown restartGeneration for active node"); + Objects.requireNonNull(currentRestartGeneration, "Unknown currentRestartGeneration for active node"); + } + this.hostname = Objects.requireNonNull(hostname); this.wantedDockerImage = Objects.requireNonNull(wantedDockerImage); this.currentDockerImage = Objects.requireNonNull(currentDockerImage); this.state = Objects.requireNonNull(state); - this.nodeType = Objects.requireNonNull(nodeType); + this.type = Objects.requireNonNull(type); this.flavor = Objects.requireNonNull(flavor); this.canonicalFlavor = canonicalFlavor; this.modelName = modelName; this.wantedVespaVersion = Objects.requireNonNull(wantedVespaVersion); - this.vespaVersion = Objects.requireNonNull(vespaVersion); + this.currentVespaVersion = Objects.requireNonNull(currentVespaVersion); this.wantedOsVersion = Objects.requireNonNull(wantedOsVersion); this.currentOsVersion = Objects.requireNonNull(currentOsVersion); this.allowedToBeDown = Objects.requireNonNull(allowedToBeDown); @@ -113,9 +120,9 @@ public class NodeSpec { this.currentRebootGeneration = currentRebootGeneration; this.wantedFirmwareCheck = Objects.requireNonNull(wantedFirmwareCheck); this.currentFirmwareCheck = Objects.requireNonNull(currentFirmwareCheck); - this.minCpuCores = minCpuCores; - this.minMainMemoryAvailableGb = minMainMemoryAvailableGb; - this.minDiskAvailableGb = minDiskAvailableGb; + this.vcpus = vcpus; + this.memoryGb = memoryGb; + this.diskGb = diskGb; this.fastDisk = fastDisk; this.bandwidth = bandwidth; this.ipAddresses = Objects.requireNonNull(ipAddresses); @@ -124,125 +131,125 @@ public class NodeSpec { this.parentHostname = Objects.requireNonNull(parentHostname); } - public String getHostname() { + public String hostname() { return hostname; } - public NodeState getState() { + public NodeState state() { return state; } - public NodeType getNodeType() { - return nodeType; + public NodeType type() { + return type; } - public String getFlavor() { + public String flavor() { return flavor; } - public String getCanonicalFlavor() { + public String canonicalFlavor() { return canonicalFlavor; } - public Optional<DockerImage> getWantedDockerImage() { + public Optional<DockerImage> wantedDockerImage() { return wantedDockerImage; } - public Optional<DockerImage> getCurrentDockerImage() { + public Optional<DockerImage> currentDockerImage() { return currentDockerImage; } - public Optional<Version> getWantedVespaVersion() { + public Optional<Version> wantedVespaVersion() { return wantedVespaVersion; } - public Optional<Version> getVespaVersion() { - return vespaVersion; + public Optional<Version> currentVespaVersion() { + return currentVespaVersion; } - public Optional<Version> getCurrentOsVersion() { + public Optional<Version> currentOsVersion() { return currentOsVersion; } - public Optional<Version> getWantedOsVersion() { + public Optional<Version> wantedOsVersion() { return wantedOsVersion; } - public Optional<Long> getWantedRestartGeneration() { + public Optional<Long> wantedRestartGeneration() { return wantedRestartGeneration; } - public Optional<Long> getCurrentRestartGeneration() { + public Optional<Long> currentRestartGeneration() { return currentRestartGeneration; } - public long getWantedRebootGeneration() { + public long wantedRebootGeneration() { return wantedRebootGeneration; } - public long getCurrentRebootGeneration() { + public long currentRebootGeneration() { return currentRebootGeneration; } - public Optional<Instant> getWantedFirmwareCheck() { + public Optional<Instant> wantedFirmwareCheck() { return wantedFirmwareCheck; } - public Optional<Instant> getCurrentFirmwareCheck() { + public Optional<Instant> currentFirmwareCheck() { return currentFirmwareCheck; } - public Optional<String> getModelName() { + public Optional<String> modelName() { return modelName; } - public Optional<Boolean> getAllowedToBeDown() { + public Optional<Boolean> allowedToBeDown() { return allowedToBeDown; } - public Optional<Boolean> getWantToDeprovision() { + public Optional<Boolean> wantToDeprovision() { return wantToDeprovision; } - public Optional<NodeOwner> getOwner() { + public Optional<NodeOwner> owner() { return owner; } - public Optional<NodeMembership> getMembership() { + public Optional<NodeMembership> membership() { return membership; } - public double getMinCpuCores() { - return minCpuCores; + public double vcpus() { + return vcpus; } - public double getMinMainMemoryAvailableGb() { - return minMainMemoryAvailableGb; + public double memoryGb() { + return memoryGb; } - public double getMinDiskAvailableGb() { - return minDiskAvailableGb; + public double diskGb() { + return diskGb; } public boolean isFastDisk() { return fastDisk; } - public double getBandwidth() { + public double bandwidth() { return bandwidth; } - public Set<String> getIpAddresses() { + public Set<String> ipAddresses() { return ipAddresses; } - public Set<String> getAdditionalIpAddresses() { + public Set<String> additionalIpAddresses() { return additionalIpAddresses; } - public NodeReports getReports() { return reports; } + public NodeReports reports() { return reports; } - public Optional<String> getParentHostname() { + public Optional<String> parentHostname() { return parentHostname; } @@ -257,11 +264,11 @@ public class NodeSpec { Objects.equals(wantedDockerImage, that.wantedDockerImage) && Objects.equals(currentDockerImage, that.currentDockerImage) && Objects.equals(state, that.state) && - Objects.equals(nodeType, that.nodeType) && + Objects.equals(type, that.type) && Objects.equals(flavor, that.flavor) && Objects.equals(canonicalFlavor, that.canonicalFlavor) && Objects.equals(wantedVespaVersion, that.wantedVespaVersion) && - Objects.equals(vespaVersion, that.vespaVersion) && + Objects.equals(currentVespaVersion, that.currentVespaVersion) && Objects.equals(wantedOsVersion, that.wantedOsVersion) && Objects.equals(currentOsVersion, that.currentOsVersion) && Objects.equals(allowedToBeDown, that.allowedToBeDown) && @@ -274,9 +281,9 @@ public class NodeSpec { Objects.equals(currentRebootGeneration, that.currentRebootGeneration) && Objects.equals(wantedFirmwareCheck, that.wantedFirmwareCheck) && Objects.equals(currentFirmwareCheck, that.currentFirmwareCheck) && - Objects.equals(minCpuCores, that.minCpuCores) && - Objects.equals(minMainMemoryAvailableGb, that.minMainMemoryAvailableGb) && - Objects.equals(minDiskAvailableGb, that.minDiskAvailableGb) && + Objects.equals(vcpus, that.vcpus) && + Objects.equals(memoryGb, that.memoryGb) && + Objects.equals(diskGb, that.diskGb) && Objects.equals(fastDisk, that.fastDisk) && Objects.equals(bandwidth, that.bandwidth) && Objects.equals(ipAddresses, that.ipAddresses) && @@ -292,11 +299,11 @@ public class NodeSpec { wantedDockerImage, currentDockerImage, state, - nodeType, + type, flavor, canonicalFlavor, wantedVespaVersion, - vespaVersion, + currentVespaVersion, wantedOsVersion, currentOsVersion, allowedToBeDown, @@ -309,9 +316,9 @@ public class NodeSpec { currentRebootGeneration, wantedFirmwareCheck, currentFirmwareCheck, - minCpuCores, - minMainMemoryAvailableGb, - minDiskAvailableGb, + vcpus, + memoryGb, + diskGb, fastDisk, bandwidth, ipAddresses, @@ -327,26 +334,26 @@ public class NodeSpec { + " wantedDockerImage=" + wantedDockerImage + " currentDockerImage=" + currentDockerImage + " state=" + state - + " nodeType=" + nodeType + + " type=" + type + " flavor=" + flavor + " canonicalFlavor=" + canonicalFlavor + " wantedVespaVersion=" + wantedVespaVersion - + " vespaVersion=" + vespaVersion + + " currentVespaVersion=" + currentVespaVersion + " wantedOsVersion=" + wantedOsVersion + " currentOsVersion=" + currentOsVersion + " allowedToBeDown=" + allowedToBeDown + " wantToDeprovision=" + wantToDeprovision + " owner=" + owner + " membership=" + membership - + " minCpuCores=" + minCpuCores + + " vcpus=" + vcpus + " wantedRestartGeneration=" + wantedRestartGeneration + " currentRestartGeneration=" + currentRestartGeneration + " wantedRebootGeneration=" + wantedRebootGeneration + " currentRebootGeneration=" + currentRebootGeneration + " wantedFirmwareCheck=" + wantedFirmwareCheck + " currentFirmwareCheck=" + currentFirmwareCheck - + " minMainMemoryAvailableGb=" + minMainMemoryAvailableGb - + " minDiskAvailableGb=" + minDiskAvailableGb + + " memoryGb=" + memoryGb + + " diskGb=" + diskGb + " fastDisk=" + fastDisk + " bandwidth=" + bandwidth + " ipAddresses=" + ipAddresses @@ -358,14 +365,14 @@ public class NodeSpec { public static class Builder { private String hostname; - private Optional<DockerImage> wantedDockerImage = Optional.empty(); - private Optional<DockerImage> currentDockerImage = Optional.empty(); private NodeState state; - private NodeType nodeType; + private NodeType type; private String flavor; private String canonicalFlavor; + private Optional<DockerImage> wantedDockerImage = Optional.empty(); + private Optional<DockerImage> currentDockerImage = Optional.empty(); private Optional<Version> wantedVespaVersion = Optional.empty(); - private Optional<Version> vespaVersion = Optional.empty(); + private Optional<Version> currentVespaVersion = Optional.empty(); private Optional<Version> wantedOsVersion = Optional.empty(); private Optional<Version> currentOsVersion = Optional.empty(); private Optional<Boolean> allowedToBeDown = Optional.empty(); @@ -379,10 +386,10 @@ public class NodeSpec { private Optional<Instant> wantedFirmwareCheck = Optional.empty(); private Optional<Instant> currentFirmwareCheck = Optional.empty(); private Optional<String> modelName = Optional.empty(); - private double minCpuCores; - private double minMainMemoryAvailableGb; - private double minDiskAvailableGb; - private boolean fastDisk = false; + private double vcpus; + private double memoryGb; + private double diskGb; + private boolean fastDisk; private double bandwidth; private Set<String> ipAddresses = Set.of(); private Set<String> additionalIpAddresses = Set.of(); @@ -394,12 +401,12 @@ public class NodeSpec { public Builder(NodeSpec node) { hostname(node.hostname); state(node.state); - nodeType(node.nodeType); + type(node.type); flavor(node.flavor); canonicalFlavor(node.canonicalFlavor); - minCpuCores(node.minCpuCores); - minMainMemoryAvailableGb(node.minMainMemoryAvailableGb); - minDiskAvailableGb(node.minDiskAvailableGb); + vcpus(node.vcpus); + memoryGb(node.memoryGb); + diskGb(node.diskGb); fastDisk(node.fastDisk); bandwidth(node.bandwidth); ipAddresses(node.ipAddresses); @@ -411,7 +418,7 @@ public class NodeSpec { node.wantedDockerImage.ifPresent(this::wantedDockerImage); node.currentDockerImage.ifPresent(this::currentDockerImage); node.wantedVespaVersion.ifPresent(this::wantedVespaVersion); - node.vespaVersion.ifPresent(this::vespaVersion); + node.currentVespaVersion.ifPresent(this::currentVespaVersion); node.wantedOsVersion.ifPresent(this::wantedOsVersion); node.currentOsVersion.ifPresent(this::currentOsVersion); node.allowedToBeDown.ifPresent(this::allowedToBeDown); @@ -445,8 +452,8 @@ public class NodeSpec { return this; } - public Builder nodeType(NodeType nodeType) { - this.nodeType = nodeType; + public Builder type(NodeType nodeType) { + this.type = nodeType; return this; } @@ -465,8 +472,8 @@ public class NodeSpec { return this; } - public Builder vespaVersion(Version vespaVersion) { - this.vespaVersion = Optional.of(vespaVersion); + public Builder currentVespaVersion(Version vespaVersion) { + this.currentVespaVersion = Optional.of(vespaVersion); return this; } @@ -530,18 +537,18 @@ public class NodeSpec { return this; } - public Builder minCpuCores(double minCpuCores) { - this.minCpuCores = minCpuCores; + public Builder vcpus(double minCpuCores) { + this.vcpus = minCpuCores; return this; } - public Builder minMainMemoryAvailableGb(double minMainMemoryAvailableGb) { - this.minMainMemoryAvailableGb = minMainMemoryAvailableGb; + public Builder memoryGb(double minMainMemoryAvailableGb) { + this.memoryGb = minMainMemoryAvailableGb; return this; } - public Builder minDiskAvailableGb(double minDiskAvailableGb) { - this.minDiskAvailableGb = minDiskAvailableGb; + public Builder diskGb(double minDiskAvailableGb) { + this.diskGb = minDiskAvailableGb; return this; } @@ -596,127 +603,127 @@ public class NodeSpec { return this; } - public String getHostname() { + public String hostname() { return hostname; } - public Optional<DockerImage> getWantedDockerImage() { + public Optional<DockerImage> wantedDockerImage() { return wantedDockerImage; } - public Optional<DockerImage> getCurrentDockerImage() { + public Optional<DockerImage> currentDockerImage() { return currentDockerImage; } - public NodeState getState() { + public NodeState state() { return state; } - public NodeType getNodeType() { - return nodeType; + public NodeType type() { + return type; } - public String getFlavor() { + public String flavor() { return flavor; } - public String getCanonicalFlavor() { + public String canonicalFlavor() { return canonicalFlavor; } - public Optional<Version> getWantedVespaVersion() { + public Optional<Version> wantedVespaVersion() { return wantedVespaVersion; } - public Optional<Version> getVespaVersion() { - return vespaVersion; + public Optional<Version> currentVespaVersion() { + return currentVespaVersion; } - public Optional<Version> getWantedOsVersion() { + public Optional<Version> wantedOsVersion() { return wantedOsVersion; } - public Optional<Version> getCurrentOsVersion() { + public Optional<Version> currentOsVersion() { return currentOsVersion; } - public Optional<Boolean> getAllowedToBeDown() { + public Optional<Boolean> allowedToBeDown() { return allowedToBeDown; } - public Optional<Boolean> getWantToDeprovision() { + public Optional<Boolean> wantToDeprovision() { return wantToDeprovision; } - public Optional<NodeOwner> getOwner() { + public Optional<NodeOwner> owner() { return owner; } - public Optional<NodeMembership> getMembership() { + public Optional<NodeMembership> membership() { return membership; } - public Optional<Long> getWantedRestartGeneration() { + public Optional<Long> wantedRestartGeneration() { return wantedRestartGeneration; } - public Optional<Long> getCurrentRestartGeneration() { + public Optional<Long> currentRestartGeneration() { return currentRestartGeneration; } - public long getWantedRebootGeneration() { + public long wantedRebootGeneration() { return wantedRebootGeneration; } - public long getCurrentRebootGeneration() { + public long currentRebootGeneration() { return currentRebootGeneration; } - public double getMinCpuCores() { - return minCpuCores; + public double vcpus() { + return vcpus; } - public double getMinMainMemoryAvailableGb() { - return minMainMemoryAvailableGb; + public double memoryGb() { + return memoryGb; } - public double getMinDiskAvailableGb() { - return minDiskAvailableGb; + public double diskGb() { + return diskGb; } public boolean isFastDisk() { return fastDisk; } - public double getBandwidth() { + public double bandwidth() { return bandwidth; } - public Set<String> getIpAddresses() { + public Set<String> ipAddresses() { return ipAddresses; } - public Set<String> getAdditionalIpAddresses() { + public Set<String> additionalIpAddresses() { return additionalIpAddresses; } - public NodeReports getReports() { + public NodeReports reports() { return reports; } - public Optional<String> getParentHostname() { + public Optional<String> parentHostname() { return parentHostname; } public NodeSpec build() { - return new NodeSpec(hostname, wantedDockerImage, currentDockerImage, state, nodeType, + return new NodeSpec(hostname, wantedDockerImage, currentDockerImage, state, type, flavor, canonicalFlavor, - wantedVespaVersion, vespaVersion, wantedOsVersion, currentOsVersion, allowedToBeDown, wantToDeprovision, + wantedVespaVersion, currentVespaVersion, wantedOsVersion, currentOsVersion, allowedToBeDown, wantToDeprovision, owner, membership, wantedRestartGeneration, currentRestartGeneration, wantedRebootGeneration, currentRebootGeneration, wantedFirmwareCheck, currentFirmwareCheck, modelName, - minCpuCores, minMainMemoryAvailableGb, minDiskAvailableGb, + vcpus, memoryGb, diskGb, fastDisk, bandwidth, ipAddresses, additionalIpAddresses, reports, parentHostname); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java index ca52eca13d2..fe19b81614d 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java @@ -12,10 +12,8 @@ import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.bindings.Ge import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.bindings.GetNodesResponse; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.bindings.NodeMessageResponse; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.bindings.NodeRepositoryNode; -import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger; import java.time.Instant; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -23,6 +21,7 @@ import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.function.Function; +import java.util.logging.Logger; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -30,7 +29,7 @@ import java.util.stream.Stream; * @author stiankri, dybis */ public class RealNodeRepository implements NodeRepository { - private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(RealNodeRepository.class); + private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName()); private final ConfigServerApi configServerApi; @@ -46,7 +45,7 @@ public class RealNodeRepository implements NodeRepository { NodeMessageResponse response = configServerApi.post("/nodes/v2/node", nodesToPost, NodeMessageResponse.class); if (Strings.isNullOrEmpty(response.errorCode)) return; - throw new NodeRepositoryException("Failed to add nodes to node-repo: " + response.message + " " + response.errorCode); + throw new NodeRepositoryException("Failed to add nodes: " + response.message + " " + response.errorCode); } @Override @@ -80,43 +79,37 @@ public class RealNodeRepository implements NodeRepository { */ @Override public Map<String, Acl> getAcls(String hostName) { - try { - String path = String.format("/nodes/v2/acl/%s?children=true", hostName); - GetAclResponse response = configServerApi.get(path, GetAclResponse.class); - - // Group ports by container hostname that trusts them - Map<String, Set<Integer>> trustedPorts = response.trustedPorts.stream() - .collect(Collectors.groupingBy( - GetAclResponse.Port::getTrustedBy, - Collectors.mapping(port -> port.port, Collectors.toSet()))); - - // Group node ip-addresses by container hostname that trusts them - Map<String, Set<Acl.Node>> trustedNodes = response.trustedNodes.stream() - .collect(Collectors.groupingBy( - GetAclResponse.Node::getTrustedBy, - Collectors.mapping( - node -> new Acl.Node(node.hostname, node.ipAddress), - Collectors.toSet()))); - - // Group trusted networks by container hostname that trusts them - Map<String, Set<String>> trustedNetworks = response.trustedNetworks.stream() - .collect(Collectors.groupingBy(GetAclResponse.Network::getTrustedBy, - Collectors.mapping(node -> node.network, Collectors.toSet()))); - - - // For each hostname create an ACL - return Stream.of(trustedNodes.keySet(), trustedPorts.keySet(), trustedNetworks.keySet()) - .flatMap(Set::stream) - .distinct() - .collect(Collectors.toMap( - Function.identity(), - hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname), - trustedNetworks.get(hostname)))); - } catch (HttpException.NotFoundException e) { - NODE_ADMIN_LOGGER.warning("Failed to fetch ACLs for " + hostName + " No ACL will be applied"); - } - - return Collections.emptyMap(); + String path = String.format("/nodes/v2/acl/%s?children=true", hostName); + GetAclResponse response = configServerApi.get(path, GetAclResponse.class); + + // Group ports by container hostname that trusts them + Map<String, Set<Integer>> trustedPorts = response.trustedPorts.stream() + .collect(Collectors.groupingBy( + GetAclResponse.Port::getTrustedBy, + Collectors.mapping(port -> port.port, Collectors.toSet()))); + + // Group node ip-addresses by container hostname that trusts them + Map<String, Set<Acl.Node>> trustedNodes = response.trustedNodes.stream() + .collect(Collectors.groupingBy( + GetAclResponse.Node::getTrustedBy, + Collectors.mapping( + node -> new Acl.Node(node.hostname, node.ipAddress), + Collectors.toSet()))); + + // Group trusted networks by container hostname that trusts them + Map<String, Set<String>> trustedNetworks = response.trustedNetworks.stream() + .collect(Collectors.groupingBy(GetAclResponse.Network::getTrustedBy, + Collectors.mapping(node -> node.network, Collectors.toSet()))); + + + // For each hostname create an ACL + return Stream.of(trustedNodes.keySet(), trustedPorts.keySet(), trustedNetworks.keySet()) + .flatMap(Set::stream) + .distinct() + .collect(Collectors.toMap( + Function.identity(), + hostname -> new Acl(trustedPorts.get(hostname), trustedNodes.get(hostname), + trustedNetworks.get(hostname)))); } @Override @@ -127,7 +120,7 @@ public class RealNodeRepository implements NodeRepository { NodeMessageResponse.class); if (Strings.isNullOrEmpty(response.errorCode)) return; - throw new NodeRepositoryException("Unexpected message " + response.message + " " + response.errorCode); + throw new NodeRepositoryException("Failed to update node attributes: " + response.message + " " + response.errorCode); } @Override @@ -137,10 +130,10 @@ public class RealNodeRepository implements NodeRepository { "/nodes/v2/state/" + state + "/" + hostName, Optional.empty(), /* body */ NodeMessageResponse.class); - NODE_ADMIN_LOGGER.info(response.message); + logger.info(response.message); if (Strings.isNullOrEmpty(response.errorCode)) return; - throw new NodeRepositoryException("Unexpected message " + response.message + " " + response.errorCode); + throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode); } private static NodeSpec createNodeSpec(NodeRepositoryNode node) { @@ -149,30 +142,13 @@ public class RealNodeRepository implements NodeRepository { Objects.requireNonNull(node.state, "Unknown node state"); NodeState nodeState = NodeState.valueOf(node.state); - if (nodeState == NodeState.active) { - Objects.requireNonNull(node.wantedVespaVersion, "Unknown vespa version for active node"); - Objects.requireNonNull(node.wantedDockerImage, "Unknown docker image for active node"); - Objects.requireNonNull(node.restartGeneration, "Unknown restartGeneration for active node"); - Objects.requireNonNull(node.currentRestartGeneration, "Unknown currentRestartGeneration for active node"); - } - - String hostName = Objects.requireNonNull(node.hostname, "hostname is null"); - - NodeOwner owner = null; - if (node.owner != null) { - owner = new NodeOwner(node.owner.tenant, node.owner.application, node.owner.instance); - } - - NodeMembership membership = null; - if (node.membership != null) { - membership = new NodeMembership(node.membership.clusterType, node.membership.clusterId, - node.membership.group, node.membership.index, node.membership.retired); - } - NodeReports reports = NodeReports.fromMap(node.reports == null ? Collections.emptyMap() : node.reports); + Optional<NodeMembership> membership = Optional.ofNullable(node.membership) + .map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired)); + NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of)); return new NodeSpec( - hostName, + node.hostname, Optional.ofNullable(node.wantedDockerImage).map(DockerImage::fromString), Optional.ofNullable(node.currentDockerImage).map(DockerImage::fromString), nodeState, @@ -185,8 +161,8 @@ public class RealNodeRepository implements NodeRepository { Optional.ofNullable(node.currentOsVersion).map(Version::fromString), Optional.ofNullable(node.allowedToBeDown), Optional.ofNullable(node.wantToDeprovision), - Optional.ofNullable(owner), - Optional.ofNullable(membership), + Optional.ofNullable(node.owner).map(o -> new NodeOwner(o.tenant, o.application, o.instance)), + membership, Optional.ofNullable(node.restartGeneration), Optional.ofNullable(node.currentRestartGeneration), node.rebootGeneration, diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorException.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorException.java index fe19da0c41c..8575bf7f655 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorException.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorException.java @@ -1,8 +1,10 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.configserver.orchestrator; +import com.yahoo.vespa.hosted.node.admin.nodeadmin.ConvergenceException; + @SuppressWarnings("serial") -public class OrchestratorException extends RuntimeException { +public class OrchestratorException extends ConvergenceException { public OrchestratorException(String message) { super(message); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java index 64a67aa612a..6124e1bdc0e 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/orchestrator/OrchestratorImpl.java @@ -40,8 +40,7 @@ public class OrchestratorImpl implements Orchestrator { } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found"); } catch (HttpException e) { - throw new OrchestratorException("Failed to suspend " + hostName + ": " + - e.toString()); + throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); } catch (RuntimeException e) { throw new RuntimeException("Got error on suspend", e); } @@ -60,9 +59,8 @@ public class OrchestratorImpl implements Orchestrator { parentHostName, params); batchOperationResult = configServerApi.put(url, Optional.empty(), BatchOperationResult.class); } catch (HttpException e) { - throw new OrchestratorException("Failed to batch suspend for " + - parentHostName + ": " + e.toString()); - } catch (Exception e) { + throw new OrchestratorException("Failed to batch suspend for " + parentHostName + ": " + e.toString()); + } catch (RuntimeException e) { throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e); } @@ -80,9 +78,8 @@ public class OrchestratorImpl implements Orchestrator { } catch (HttpException.NotFoundException n) { throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found"); } catch (HttpException e) { - throw new OrchestratorException("Failed to suspend " + hostName + ": " + - e.toString()); - } catch (Exception e) { + throw new OrchestratorException("Failed to suspend " + hostName + ": " + e.toString()); + } catch (RuntimeException e) { throw new RuntimeException("Got error on resume", e); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImpl.java index efeb3039379..2fe8d4b4792 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImpl.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.hosted.node.admin.configserver.state; import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApi; +import com.yahoo.vespa.hosted.node.admin.configserver.HttpException; import com.yahoo.vespa.hosted.node.admin.configserver.state.bindings.HealthResponse; /** @@ -16,26 +17,12 @@ public class StateImpl implements State { @Override public HealthCode getHealth() { - HealthResponse response; try { - response = configServerApi.get("/state/v1/health", HealthResponse.class); - } catch (RuntimeException e) { - if (causedByConnectionRefused(e)) { - return HealthCode.DOWN; - } - - throw e; + HealthResponse response = configServerApi.get("/state/v1/health", HealthResponse.class); + return HealthCode.fromString(response.status.code); + } catch (HttpException e) { + return HealthCode.DOWN; } - return HealthCode.fromString(response.status.code); } - private static boolean causedByConnectionRefused(Throwable throwable) { - for (Throwable cause = throwable; cause != null; cause = cause.getCause()) { - if (cause instanceof java.net.ConnectException) { - return true; - } - } - - return false; - } } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java index 954ba25895a..1a993b2687c 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java @@ -64,13 +64,13 @@ public class DockerOperationsImpl implements DockerOperations { context.log(logger, "Creating container"); // IPv6 - Assume always valid - Inet6Address ipV6Address = ipAddresses.getIPv6Address(context.node().getHostname()).orElseThrow( - () -> new RuntimeException("Unable to find a valid IPv6 address for " + context.node().getHostname() + + Inet6Address ipV6Address = ipAddresses.getIPv6Address(context.node().hostname()).orElseThrow( + () -> new RuntimeException("Unable to find a valid IPv6 address for " + context.node().hostname() + ". Missing an AAAA DNS entry?")); Docker.CreateContainerCommand command = docker.createContainerCommand( - context.node().getWantedDockerImage().get(), context.containerName()) - .withHostName(context.node().getHostname()) + context.node().wantedDockerImage().get(), context.containerName()) + .withHostName(context.node().hostname()) .withResources(containerResources) .withManagedBy(MANAGER_NAME) .withUlimit("nofile", 262_144, 262_144) @@ -88,7 +88,7 @@ public class DockerOperationsImpl implements DockerOperations { .withAddCapability("SYS_ADMIN") // Needed for perf .withAddCapability("SYS_NICE"); // Needed for set_mempolicy to work - if (context.node().getMembership().map(NodeMembership::getClusterType).map("content"::equalsIgnoreCase).orElse(false)) { + if (context.node().membership().map(NodeMembership::clusterType).map("content"::equalsIgnoreCase).orElse(false)) { command.withSecurityOpts("seccomp=unconfined"); } @@ -101,20 +101,20 @@ public class DockerOperationsImpl implements DockerOperations { command.withIpAddress(ipV6Local); // IPv4 - Only present for some containers - Optional<InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().getHostname()) + Optional<InetAddress> ipV4Local = ipAddresses.getIPv4Address(context.node().hostname()) .map(ipV4Address -> { InetAddress ipV4Prefix = InetAddresses.forString(IPV4_NPT_PREFIX); return IPAddresses.prefixTranslate(ipV4Address, ipV4Prefix, 2); }); ipV4Local.ifPresent(command::withIpAddress); - addEtcHosts(containerData, context.node().getHostname(), ipV4Local, ipV6Local); + addEtcHosts(containerData, context.node().hostname(), ipV4Local, ipV6Local); } addMounts(context, command); // TODO: Enforce disk constraints - long minMainMemoryAvailableMb = (long) (context.node().getMinMainMemoryAvailableGb() * 1024); + long minMainMemoryAvailableMb = (long) (context.node().memoryGb() * 1024); if (minMainMemoryAvailableMb > 0) { // VESPA_TOTAL_MEMORY_MB is used to make any jdisc container think the machine // only has this much physical memory (overrides total memory reported by `free -m`). diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java index 4972a306377..26e4dcda88e 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java @@ -158,23 +158,23 @@ public class StorageMaintainer { private Map<String, Object> generateTags(NodeAgentContext context) { Map<String, String> tags = new LinkedHashMap<>(); tags.put("namespace", "Vespa"); - tags.put("role", nodeTypeToRole(context.node().getNodeType())); + tags.put("role", nodeTypeToRole(context.node().type())); tags.put("zone", context.zone().getId().value()); - context.node().getVespaVersion().ifPresent(version -> tags.put("vespaVersion", version.toFullString())); + context.node().currentVespaVersion().ifPresent(version -> tags.put("vespaVersion", version.toFullString())); if (! isConfigserverLike(context.nodeType())) { - tags.put("state", context.node().getState().toString()); - context.node().getParentHostname().ifPresent(parent -> tags.put("parentHostname", parent)); - context.node().getOwner().ifPresent(owner -> { - tags.put("tenantName", owner.getTenant()); - tags.put("app", owner.getApplication() + "." + owner.getInstance()); - tags.put("applicationName", owner.getApplication()); - tags.put("instanceName", owner.getInstance()); - tags.put("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance()); + tags.put("state", context.node().state().toString()); + context.node().parentHostname().ifPresent(parent -> tags.put("parentHostname", parent)); + context.node().owner().ifPresent(owner -> { + tags.put("tenantName", owner.tenant()); + tags.put("app", owner.application() + "." + owner.instance()); + tags.put("applicationName", owner.application()); + tags.put("instanceName", owner.instance()); + tags.put("applicationId", owner.tenant() + "." + owner.application() + "." + owner.instance()); }); - context.node().getMembership().ifPresent(membership -> { - tags.put("clustertype", membership.getClusterType()); - tags.put("clusterid", membership.getClusterId()); + context.node().membership().ifPresent(membership -> { + tags.put("clustertype", membership.clusterType()); + tags.put("clusterid", membership.clusterId()); }); } @@ -260,20 +260,20 @@ public class StorageMaintainer { private Map<String, Object> getCoredumpNodeAttributes(NodeAgentContext context, Optional<Container> container) { Map<String, String> attributes = new HashMap<>(); - attributes.put("hostname", context.node().getHostname()); + attributes.put("hostname", context.node().hostname()); attributes.put("region", context.zone().getRegionName().value()); attributes.put("environment", context.zone().getEnvironment().value()); - attributes.put("flavor", context.node().getFlavor()); + attributes.put("flavor", context.node().flavor()); attributes.put("kernel_version", System.getProperty("os.version")); attributes.put("cpu_microcode_version", getMicrocodeVersion()); container.map(c -> c.image).ifPresent(image -> attributes.put("docker_image", image.asString())); - context.node().getParentHostname().ifPresent(parent -> attributes.put("parent_hostname", parent)); - context.node().getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version.toFullString())); - context.node().getOwner().ifPresent(owner -> { - attributes.put("tenant", owner.getTenant()); - attributes.put("application", owner.getApplication()); - attributes.put("instance", owner.getInstance()); + context.node().parentHostname().ifPresent(parent -> attributes.put("parent_hostname", parent)); + context.node().currentVespaVersion().ifPresent(version -> attributes.put("vespa_version", version.toFullString())); + context.node().owner().ifPresent(owner -> { + attributes.put("tenant", owner.tenant()); + attributes.put("application", owner.application()); + attributes.put("instance", owner.instance()); }); return Collections.unmodifiableMap(attributes); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java index 00ec985ba0c..7de2aae77c8 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java @@ -16,11 +16,8 @@ public interface NodeAdmin { /** Start/stop NodeAgents and schedule next NodeAgent ticks with the given NodeAgentContexts */ void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts); - /** Gather node agent and its docker container metrics and forward them to the {@code MetricReceiverWrapper} */ - void updateNodeAgentMetrics(); - - /** Gather node admin metrics and forward them to the {@code MetricReceiverWrapper} */ - void updateNodeAdminMetrics(); + /** Update node admin metrics */ + void updateMetrics(); /** * Attempts to freeze/unfreeze all NodeAgents and itself. To freeze a NodeAgent means that diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java index 0d520241ac8..cb10eac9e6c 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java @@ -1,10 +1,10 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.node.admin.nodeadmin; -import com.yahoo.vespa.hosted.dockerapi.metrics.CounterWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.Counter; import com.yahoo.vespa.hosted.dockerapi.metrics.Dimensions; -import com.yahoo.vespa.hosted.dockerapi.metrics.GaugeWrapper; -import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.Gauge; +import com.yahoo.vespa.hosted.dockerapi.metrics.Metrics; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContextManager; @@ -39,28 +39,26 @@ public class NodeAdminImpl implements NodeAdmin { private boolean previousWantFrozen; private boolean isFrozen; private Instant startOfFreezeConvergence; - private final Map<String, NodeAgentWithScheduler> nodeAgentWithSchedulerByHostname = new ConcurrentHashMap<>(); - private final GaugeWrapper numberOfContainersInLoadImageState; - private final GaugeWrapper jvmHeapUsed; - private final GaugeWrapper jvmHeapFree; - private final GaugeWrapper jvmHeapTotal; - private final CounterWrapper numberOfUnhandledExceptionsInNodeAgent; + private final Gauge jvmHeapUsed; + private final Gauge jvmHeapFree; + private final Gauge jvmHeapTotal; + private final Counter numberOfUnhandledExceptions; - public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, Clock clock) { + public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), - metricReceiver, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD); + metrics, clock, NODE_AGENT_FREEZE_TIMEOUT, NODE_AGENT_SPREAD); } - public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, MetricReceiverWrapper metricReceiver, + public NodeAdminImpl(NodeAgentFactory nodeAgentFactory, Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread) { this((NodeAgentWithSchedulerFactory) nodeAgentContext -> create(clock, nodeAgentFactory, nodeAgentContext), - metricReceiver, clock, freezeTimeout, spread); + metrics, clock, freezeTimeout, spread); } NodeAdminImpl(NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory, - MetricReceiverWrapper metricReceiver, Clock clock, Duration freezeTimeout, Duration spread) { + Metrics metrics, Clock clock, Duration freezeTimeout, Duration spread) { this.nodeAgentWithSchedulerFactory = nodeAgentWithSchedulerFactory; this.clock = clock; @@ -70,13 +68,12 @@ public class NodeAdminImpl implements NodeAdmin { this.isFrozen = true; this.startOfFreezeConvergence = clock.instant(); - Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build(); - this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading"); - this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions"); + this.numberOfUnhandledExceptions = metrics.declareCounter("unhandled_exceptions", + new Dimensions(Map.of("src", "node-agents"))); - this.jvmHeapUsed = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST, new Dimensions.Builder().build(), "mem.heap.used"); - this.jvmHeapFree = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST, new Dimensions.Builder().build(), "mem.heap.free"); - this.jvmHeapTotal = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_HOST, new Dimensions.Builder().build(), "mem.heap.total"); + this.jvmHeapUsed = metrics.declareGauge("mem.heap.used"); + this.jvmHeapFree = metrics.declareGauge("mem.heap.free"); + this.jvmHeapTotal = metrics.declareGauge("mem.heap.total"); } @Override @@ -105,22 +102,12 @@ public class NodeAdminImpl implements NodeAdmin { } @Override - public void updateNodeAgentMetrics() { - int numberContainersWaitingImage = 0; - int numberOfNewUnhandledExceptions = 0; - + public void updateMetrics() { for (NodeAgentWithScheduler nodeAgentWithScheduler : nodeAgentWithSchedulerByHostname.values()) { - if (nodeAgentWithScheduler.isDownloadingImage()) numberContainersWaitingImage++; - numberOfNewUnhandledExceptions += nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions(); + numberOfUnhandledExceptions.add(nodeAgentWithScheduler.getAndResetNumberOfUnhandledExceptions()); nodeAgentWithScheduler.updateContainerNodeMetrics(); } - numberOfContainersInLoadImageState.sample(numberContainersWaitingImage); - numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions); - } - - @Override - public void updateNodeAdminMetrics() { Runtime runtime = Runtime.getRuntime(); long freeMemory = runtime.freeMemory(); long totalMemory = runtime.totalMemory(); @@ -208,7 +195,6 @@ public class NodeAdminImpl implements NodeAdmin { @Override public void stopForHostSuspension() { nodeAgent.stopForHostSuspension(); } @Override public void stopForRemoval() { nodeAgent.stopForRemoval(); } @Override public void updateContainerNodeMetrics() { nodeAgent.updateContainerNodeMetrics(); } - @Override public boolean isDownloadingImage() { return nodeAgent.isDownloadingImage(); } @Override public int getAndResetNumberOfUnhandledExceptions() { return nodeAgent.getAndResetNumberOfUnhandledExceptions(); } @Override public void scheduleTickWith(NodeAgentContext context, Instant at) { nodeAgentScheduler.scheduleTickWith(context, at); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java index 2cd15a3ebe4..4a76e0e0a5b 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java @@ -80,8 +80,7 @@ public class NodeAdminStateUpdater { metricsScheduler.scheduleAtFixedRate(() -> { try { if (suspendedStates.contains(currentState)) return; - nodeAdmin.updateNodeAgentMetrics(); - nodeAdmin.updateNodeAdminMetrics(); + nodeAdmin.updateMetrics(); } catch (Throwable e) { log.log(Level.WARNING, "Metric fetcher scheduler failed", e); } @@ -126,7 +125,7 @@ public class NodeAdminStateUpdater { throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen")); } - boolean hostIsActiveInNR = nodeRepository.getNode(hostHostname).getState() == NodeState.active; + boolean hostIsActiveInNR = nodeRepository.getNode(hostHostname).state() == NodeState.active; switch (wantedState) { case RESUMED: if (hostIsActiveInNR) orchestrator.resume(hostHostname); @@ -165,7 +164,7 @@ public class NodeAdminStateUpdater { void adjustNodeAgentsToRunFromNodeRepository() { try { Map<String, NodeSpec> nodeSpecByHostname = nodeRepository.getNodes(hostHostname).stream() - .collect(Collectors.toMap(NodeSpec::getHostname, Function.identity())); + .collect(Collectors.toMap(NodeSpec::hostname, Function.identity())); Map<String, Acl> aclByHostname = Optional.of(cachedAclSupplier.get()) .filter(acls -> acls.keySet().containsAll(nodeSpecByHostname.keySet())) .orElseGet(cachedAclSupplier::invalidateAndGet); @@ -184,8 +183,8 @@ public class NodeAdminStateUpdater { private List<String> getNodesInActiveState() { return nodeRepository.getNodes(hostHostname) .stream() - .filter(node -> node.getState() == NodeState.active) - .map(NodeSpec::getHostname) + .filter(node -> node.state() == NodeState.active) + .map(NodeSpec::hostname) .collect(Collectors.toList()); } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java index d62cb8e45d9..de5ee1b69a4 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java @@ -33,11 +33,6 @@ public interface NodeAgent { void updateContainerNodeMetrics(); /** - * Returns true if NodeAgent is waiting for an image download to finish - */ - boolean isDownloadingImage(); - - /** * Returns and resets number of unhandled exceptions */ int getAndResetNumberOfUnhandledExceptions(); diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java index a7cdd7e655d..f1fd97f6e4c 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContext.java @@ -26,11 +26,11 @@ public interface NodeAgentContext extends TaskContext { /** @return hostname of the docker container this context applies to */ default HostName hostname() { - return HostName.from(node().getHostname()); + return HostName.from(node().hostname()); } default NodeType nodeType() { - return node().getNodeType(); + return node().type(); } AthenzIdentity identity(); diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java index 8435fe34770..ef8ea60bee3 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java @@ -1,9 +1,7 @@ package com.yahoo.vespa.hosted.node.admin.nodeagent; import com.yahoo.config.provision.CloudName; -import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.NodeType; -import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.SystemName; import com.yahoo.config.provision.zone.ZoneApi; import com.yahoo.config.provision.zone.ZoneId; @@ -47,7 +45,7 @@ public class NodeAgentContextImpl implements NodeAgentContext { String vespaUser, String vespaUserOnHost) { this.node = Objects.requireNonNull(node); this.acl = Objects.requireNonNull(acl); - this.containerName = ContainerName.fromHostname(node.getHostname()); + this.containerName = ContainerName.fromHostname(node.hostname()); this.identity = Objects.requireNonNull(identity); this.dockerNetworking = Objects.requireNonNull(dockerNetworking); this.zone = Objects.requireNonNull(zone); @@ -181,12 +179,12 @@ public class NodeAgentContextImpl implements NodeAgentContext { this.nodeSpecBuilder .hostname(hostname) .state(NodeState.active) - .nodeType(NodeType.tenant) + .type(NodeType.tenant) .flavor("d-2-8-50"); } public Builder nodeType(NodeType nodeType) { - this.nodeSpecBuilder.nodeType(nodeType); + this.nodeSpecBuilder.type(nodeType); return this; } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java index 8c38b1bbd84..977f1016ed8 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java @@ -18,7 +18,7 @@ import com.yahoo.vespa.hosted.dockerapi.exception.DockerException; import com.yahoo.vespa.hosted.dockerapi.exception.DockerExecTimeoutException; import com.yahoo.vespa.hosted.dockerapi.metrics.DimensionMetrics; import com.yahoo.vespa.hosted.dockerapi.metrics.Dimensions; -import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.Metrics; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeAttributes; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeOwner; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository; @@ -40,7 +40,6 @@ import java.util.Objects; import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; -import java.util.logging.Level; import java.util.logging.Logger; import static com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl.ContainerState.ABSENT; @@ -73,8 +72,6 @@ public class NodeAgentImpl implements NodeAgent { private final DoubleFlag containerCpuCap; private int numberOfUnhandledException = 0; - private DockerImage imageBeingDownloaded = null; - private long currentRebootGeneration = 0; private Optional<Long> currentRestartGeneration = Optional.empty(); @@ -121,7 +118,7 @@ public class NodeAgentImpl implements NodeAgent { this.healthChecker = healthChecker; this.containerCpuCap = Flags.CONTAINER_CPU_CAP.bindTo(flagSource) - .with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().getHostname()); + .with(FetchVector.Dimension.HOSTNAME, contextSupplier.currentContext().node().hostname()); this.loopThread = new Thread(() -> { while (!terminated.get()) { @@ -175,20 +172,20 @@ public class NodeAgentImpl implements NodeAgent { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); - if (context.node().getWantedRestartGeneration().isPresent() && - !Objects.equals(context.node().getCurrentRestartGeneration(), currentRestartGeneration)) { - currentNodeAttributes.withRestartGeneration(context.node().getCurrentRestartGeneration()); + if (context.node().wantedRestartGeneration().isPresent() && + !Objects.equals(context.node().currentRestartGeneration(), currentRestartGeneration)) { + currentNodeAttributes.withRestartGeneration(context.node().currentRestartGeneration()); newNodeAttributes.withRestartGeneration(currentRestartGeneration); } - if (!Objects.equals(context.node().getCurrentRebootGeneration(), currentRebootGeneration)) { - currentNodeAttributes.withRebootGeneration(context.node().getCurrentRebootGeneration()); + if (!Objects.equals(context.node().currentRebootGeneration(), currentRebootGeneration)) { + currentNodeAttributes.withRebootGeneration(context.node().currentRebootGeneration()); newNodeAttributes.withRebootGeneration(currentRebootGeneration); } - Optional<DockerImage> actualDockerImage = context.node().getWantedDockerImage().filter(n -> containerState == UNKNOWN); - if (!Objects.equals(context.node().getCurrentDockerImage(), actualDockerImage)) { - DockerImage currentImage = context.node().getCurrentDockerImage().orElse(DockerImage.EMPTY); + Optional<DockerImage> actualDockerImage = context.node().wantedDockerImage().filter(n -> containerState == UNKNOWN); + if (!Objects.equals(context.node().currentDockerImage(), actualDockerImage)) { + DockerImage currentImage = context.node().currentDockerImage().orElse(DockerImage.EMPTY); DockerImage newImage = actualDockerImage.orElse(DockerImage.EMPTY); currentNodeAttributes.withDockerImage(currentImage); @@ -231,7 +228,7 @@ public class NodeAgentImpl implements NodeAgent { shouldRestartServices(context.node()).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(context, existingContainer.get()); - currentRestartGeneration = context.node().getWantedRestartGeneration(); + currentRestartGeneration = context.node().wantedRestartGeneration(); }); } @@ -239,18 +236,18 @@ public class NodeAgentImpl implements NodeAgent { } private Optional<String> shouldRestartServices(NodeSpec node) { - if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); + if (!node.wantedRestartGeneration().isPresent()) return Optional.empty(); // Restart generation is only optional because it does not exist for unallocated nodes - if (currentRestartGeneration.get() < node.getWantedRestartGeneration().get()) { + if (currentRestartGeneration.get() < node.wantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " - + currentRestartGeneration.get() + " -> " + node.getWantedRestartGeneration().get()); + + currentRestartGeneration.get() + " -> " + node.wantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeAgentContext context, Container existingContainer) { - if (existingContainer.state.isRunning() && context.node().getState() == NodeState.active) { + if (existingContainer.state.isRunning() && context.node().state() == NodeState.active) { context.log(logger, "Restarting services"); // Since we are restarting the services we need to suspend the node. orchestratorSuspendNode(context); @@ -293,22 +290,22 @@ public class NodeAgentImpl implements NodeAgent { } private Optional<String> shouldRemoveContainer(NodeAgentContext context, Container existingContainer) { - final NodeState nodeState = context.node().getState(); + final NodeState nodeState = context.node().state(); if (nodeState == NodeState.dirty || nodeState == NodeState.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } - if (context.node().getWantedDockerImage().isPresent() && - !context.node().getWantedDockerImage().get().equals(existingContainer.image)) { + if (context.node().wantedDockerImage().isPresent() && + !context.node().wantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " - + existingContainer.image.asString() + " -> " + context.node().getWantedDockerImage().get().asString()); + + existingContainer.image.asString() + " -> " + context.node().wantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } - if (currentRebootGeneration < context.node().getWantedRebootGeneration()) { + if (currentRebootGeneration < context.node().wantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", - currentRebootGeneration, context.node().getWantedRebootGeneration())); + currentRebootGeneration, context.node().wantedRebootGeneration())); } // Even though memory can be easily changed with docker update, we need to restart the container @@ -333,7 +330,7 @@ public class NodeAgentImpl implements NodeAgent { } try { - if (context.node().getState() != NodeState.dirty) { + if (context.node().state() != NodeState.dirty) { suspend(); } stopServices(); @@ -344,7 +341,7 @@ public class NodeAgentImpl implements NodeAgent { storageMaintainer.handleCoreDumpsForContainer(context, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); - currentRebootGeneration = context.node().getWantedRebootGeneration(); + currentRebootGeneration = context.node().wantedRebootGeneration(); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); } @@ -364,13 +361,13 @@ public class NodeAgentImpl implements NodeAgent { private ContainerResources getContainerResources(NodeAgentContext context) { double cpuCap = noCpuCap(context.zone()) ? 0 : - context.node().getOwner() + context.node().owner() .map(NodeOwner::asApplicationId) .map(appId -> containerCpuCap.with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())) .orElse(containerCpuCap) - .value() * context.node().getMinCpuCores(); + .value() * context.node().vcpus(); - return ContainerResources.from(cpuCap, context.node().getMinCpuCores(), context.node().getMinMainMemoryAvailableGb()); + return ContainerResources.from(cpuCap, context.node().vcpus(), context.node().memoryGb()); } private boolean noCpuCap(ZoneApi zone) { @@ -378,20 +375,16 @@ public class NodeAgentImpl implements NodeAgent { || (zone.getSystemName().isCd() && zone.getEnvironment() != Environment.prod); } - private void scheduleDownLoadIfNeeded(NodeSpec node, Optional<Container> container) { - if (node.getWantedDockerImage().equals(container.map(c -> c.image))) return; + private boolean downloadImageIfNeeded(NodeSpec node, Optional<Container> container) { + if (node.wantedDockerImage().equals(container.map(c -> c.image))) return false; - if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { - imageBeingDownloaded = node.getWantedDockerImage().get(); - } else if (imageBeingDownloaded != null) { // Image was downloading, but now it's ready - imageBeingDownloaded = null; - } + return node.wantedDockerImage().map(dockerOperations::pullImageAsyncIfNeeded).orElse(false); } public void converge(NodeAgentContext context) { try { doConverge(context); - } catch (OrchestratorException | ConvergenceException e) { + } catch (ConvergenceException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; @@ -413,14 +406,14 @@ public class NodeAgentImpl implements NodeAgent { logChangesToNodeSpec(context, lastNode, node); // Current reboot generation uninitialized or incremented from outside to cancel reboot - if (currentRebootGeneration < node.getCurrentRebootGeneration()) - currentRebootGeneration = node.getCurrentRebootGeneration(); + if (currentRebootGeneration < node.currentRebootGeneration()) + currentRebootGeneration = node.currentRebootGeneration(); // Either we have changed allocation status (restart gen. only available to allocated nodes), or // restart generation has been incremented from outside to cancel restart - if (currentRestartGeneration.isPresent() != node.getCurrentRestartGeneration().isPresent() || - currentRestartGeneration.map(current -> current < node.getCurrentRestartGeneration().get()).orElse(false)) - currentRestartGeneration = node.getCurrentRestartGeneration(); + if (currentRestartGeneration.isPresent() != node.currentRestartGeneration().isPresent() || + currentRestartGeneration.map(current -> current < node.currentRestartGeneration().get()).orElse(false)) + currentRestartGeneration = node.currentRestartGeneration(); // Every time the node spec changes, we should clear the metrics for this container as the dimensions // will change and we will be reporting duplicate metrics. @@ -431,11 +424,12 @@ public class NodeAgentImpl implements NodeAgent { lastNode = node; } - switch (node.getState()) { + switch (node.state()) { case ready: case reserved: case parked: case failed: + case inactive: removeContainerIfNeededUpdateContainerState(context, container); updateNodeRepoWithCurrentAttributes(context); break; @@ -443,13 +437,12 @@ public class NodeAgentImpl implements NodeAgent { storageMaintainer.handleCoreDumpsForContainer(context, container); storageMaintainer.getDiskUsageFor(context) - .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) + .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.diskGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); - scheduleDownLoadIfNeeded(node, container); - if (isDownloadingImage()) { - context.log(logger, "Waiting for image to download " + imageBeingDownloaded.asString()); + if (downloadImageIfNeeded(node, container)) { + context.log(logger, "Waiting for image to download " + context.node().wantedDockerImage().get().asString()); return; } container = removeContainerIfNeededUpdateContainerState(context, container); @@ -481,29 +474,25 @@ public class NodeAgentImpl implements NodeAgent { context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; - case inactive: - removeContainerIfNeededUpdateContainerState(context, container); - updateNodeRepoWithCurrentAttributes(context); - break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), NodeState.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(context, container); - context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); + context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready"); credentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(context); nodeRepository.setNodeState(context.hostname().value(), NodeState.ready); break; default: - throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); + throw new ConvergenceException("UNKNOWN STATE " + node.state().name()); } } private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); - appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); + appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } @@ -536,14 +525,14 @@ public class NodeAgentImpl implements NodeAgent { Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) - .add("state", node.getState().toString()); - node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); - node.getAllowedToBeDown().ifPresent(allowed -> + .add("state", node.state().toString()); + node.parentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); + node.allowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); - final String APP = MetricReceiverWrapper.APPLICATION_NODE; + final String APP = Metrics.APPLICATION_NODE; final int totalNumCpuCores = stats.getCpuStats().getOnlineCpus(); final long cpuContainerKernelTime = stats.getCpuStats().getUsageInKernelMode(); final long cpuContainerTotalTime = stats.getCpuStats().getTotalUsage(); @@ -551,13 +540,13 @@ public class NodeAgentImpl implements NodeAgent { final long memoryTotalBytes = stats.getMemoryStats().getLimit(); final long memoryTotalBytesUsage = stats.getMemoryStats().getUsage(); final long memoryTotalBytesCache = stats.getMemoryStats().getCache(); - final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); + final long diskTotalBytes = (long) (node.diskGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); // Ratio of CPU cores allocated to this container to total number of CPU cores on this host - final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; + final double allocatedCpuRatio = node.vcpus() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; @@ -575,7 +564,7 @@ public class NodeAgentImpl implements NodeAgent { .withMetric("mem_total.util", 100 * memoryTotalUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) - .withMetric("cpu.vcpus", node.getMinCpuCores()) + .withMetric("cpu.vcpus", node.vcpus()) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); @@ -604,27 +593,15 @@ public class NodeAgentImpl implements NodeAgent { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } - } catch (JsonProcessingException e) { - // TODO: wrap everything into one try-block (to avoid 'return') when old metrics proxy is discontinued - context.log(logger, LogLevel.WARNING, "Failed to wrap metrics in secret agent report", e); - return; - } - String wrappedMetrics = "s:" + params.toString(); - - // Push metrics to the metrics proxy in each container - runPushMetricsCommand(context, wrappedMetrics, true); - runPushMetricsCommand(context, wrappedMetrics, false); - } + String wrappedMetrics = "s:" + params.toString(); - // TODO: Clean up and inline method when old metrics proxy has been discontinued. - private void runPushMetricsCommand(NodeAgentContext context, String wrappedMetrics, boolean newMetricsProxy) { - int port = newMetricsProxy ? 19095 : 19091; - String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:" + port, "setExtraMetrics", wrappedMetrics}; - try { + // Push metrics to the metrics proxy in each container. + // TODO Remove port selection logic when all hosted apps have upgraded to Vespa 7. + int port = context.node().currentVespaVersion().map(version -> version.getMajor() == 6).orElse(false) ? 19091 : 19095; + String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:" + port, "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); - } catch (DockerExecTimeoutException e) { - Level level = newMetricsProxy ? LogLevel.DEBUG : LogLevel.WARNING; - context.log(logger, level, "Failed to push metrics to container", e); + } catch (JsonProcessingException | DockerExecTimeoutException e) { + context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } @@ -637,11 +614,6 @@ public class NodeAgentImpl implements NodeAgent { } @Override - public boolean isDownloadingImage() { - return imageBeingDownloaded != null; - } - - @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; @@ -694,7 +666,7 @@ public class NodeAgentImpl implements NodeAgent { // to allow the node admin to make decisions that depend on the docker image. Or, each docker image // needs to contain routines for drain and suspend. For many images, these can just be dummy routines. private void orchestratorSuspendNode(NodeAgentContext context) { - if (context.node().getState() != NodeState.active) return; + if (context.node().state() != NodeState.active) return; context.log(logger, "Ask Orchestrator for permission to suspend node"); try { diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/PrefixLogger.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/PrefixLogger.java deleted file mode 100644 index f4d85a19f6d..00000000000 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/PrefixLogger.java +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.node.admin.util; - -import com.yahoo.log.LogLevel; -import com.yahoo.vespa.hosted.dockerapi.ContainerName; - -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * @author freva - */ -public class PrefixLogger { - private final String prefix; - private final Logger logger; - - private <T> PrefixLogger(Class<T> clazz, String prefix) { - this.logger = Logger.getLogger(clazz.getName()); - this.prefix = prefix + ": "; - } - - public static <T> PrefixLogger getNodeAdminLogger(Class<T> clazz) { - return new PrefixLogger(clazz, "NodeAdmin"); - } - - public static <T> PrefixLogger getNodeAgentLogger(Class<T> clazz, ContainerName containerName) { - return new PrefixLogger(clazz, "NodeAgent-" + containerName.asString()); - } - - private void log(Level level, String message, Throwable thrown) { - logger.log(level, prefix + message, thrown); - } - - private void log(Level level, String message) { - logger.log(level, prefix + message); - } - - - public void debug(String message) { - log(LogLevel.DEBUG, message); - } - - public void info(String message) { - log(LogLevel.INFO, message); - } - - public void info(String message, Throwable thrown) { - log(LogLevel.INFO, message, thrown); - } - - public void error(String message) { - log(LogLevel.ERROR, message); - } - - public void error(String message, Throwable thrown) { - log(LogLevel.ERROR, message, thrown); - } - - public void warning(String message) { - log(LogLevel.WARNING, message); - } - - public void warning(String message, Throwable thrown) { - log(LogLevel.WARNING, message, thrown); - } -} diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java index fb443ed14c4..0938eb23b49 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java @@ -105,14 +105,14 @@ public class RealNodeRepositoryTest { List<NodeSpec> containersToRun = nodeRepositoryApi.getNodes(dockerHostHostname); assertThat(containersToRun.size(), is(1)); NodeSpec node = containersToRun.get(0); - assertThat(node.getHostname(), is("host4.yahoo.com")); - assertThat(node.getWantedDockerImage().get(), is(DockerImage.fromString("docker-registry.domain.tld:8080/dist/vespa:6.42.0"))); - assertThat(node.getState(), is(NodeState.active)); - assertThat(node.getWantedRestartGeneration().get(), is(0L)); - assertThat(node.getCurrentRestartGeneration().get(), is(0L)); - assertEquals(1, node.getMinCpuCores(), delta); - assertEquals(1, node.getMinMainMemoryAvailableGb(), delta); - assertEquals(100, node.getMinDiskAvailableGb(), delta); + assertThat(node.hostname(), is("host4.yahoo.com")); + assertThat(node.wantedDockerImage().get(), is(DockerImage.fromString("docker-registry.domain.tld:8080/dist/vespa:6.42.0"))); + assertThat(node.state(), is(NodeState.active)); + assertThat(node.wantedRestartGeneration().get(), is(0L)); + assertThat(node.currentRestartGeneration().get(), is(0L)); + assertEquals(1, node.vcpus(), delta); + assertEquals(1, node.memoryGb(), delta); + assertEquals(100, node.diskGb(), delta); } @Test @@ -120,7 +120,7 @@ public class RealNodeRepositoryTest { String hostname = "host4.yahoo.com"; Optional<NodeSpec> node = nodeRepositoryApi.getOptionalNode(hostname); assertTrue(node.isPresent()); - assertEquals(hostname, node.get().getHostname()); + assertEquals(hostname, node.get().hostname()); } @Test @@ -176,8 +176,8 @@ public class RealNodeRepositoryTest { NodeSpec hostSpecInNodeRepo = nodeRepositoryApi.getOptionalNode("host123.domain.tld") .orElseThrow(RuntimeException::new); - assertEquals(host.nodeFlavor, hostSpecInNodeRepo.getFlavor()); - assertEquals(host.nodeType, hostSpecInNodeRepo.getNodeType()); + assertEquals(host.nodeFlavor, hostSpecInNodeRepo.flavor()); + assertEquals(host.nodeType, hostSpecInNodeRepo.type()); assertTrue(nodeRepositoryApi.getOptionalNode("host123-1.domain.tld").isPresent()); } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImplTest.java index 2604aa05367..14755ebf9cc 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImplTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/state/StateImplTest.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.hosted.node.admin.configserver.state; import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApi; +import com.yahoo.vespa.hosted.node.admin.configserver.HttpException; import com.yahoo.vespa.hosted.node.admin.configserver.state.bindings.HealthResponse; import org.junit.Test; @@ -28,7 +29,7 @@ public class StateImplTest { @Test public void connectException() { - RuntimeException exception = new RuntimeException(new ConnectException("connection refused")); + RuntimeException exception = HttpException.handleException("Error: ", new ConnectException("connection refused")); when(api.get(any(), any())).thenThrow(exception); HealthCode code = state.getHealth(); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java index f3e334fff73..aacb2cafd30 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java @@ -29,13 +29,13 @@ public class DockerFailTest { .wantedDockerImage(dockerImage) .currentDockerImage(dockerImage) .state(NodeState.active) - .nodeType(NodeType.tenant) + .type(NodeType.tenant) .flavor("docker") .wantedRestartGeneration(1L) .currentRestartGeneration(1L) - .minCpuCores(1) - .minMainMemoryAvailableGb(1) - .minDiskAvailableGb(1) + .vcpus(1) + .memoryGb(1) + .diskGb(1) .build()); tester.inOrder(tester.docker).createContainerCommand(eq(dockerImage), eq(containerName)); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java index f9524d32c81..22b3949755f 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java @@ -4,11 +4,10 @@ package com.yahoo.vespa.hosted.node.admin.integrationTests; import com.yahoo.collections.Pair; import com.yahoo.config.provision.HostName; import com.yahoo.config.provision.NodeType; -import com.yahoo.metrics.simple.MetricReceiver; import com.yahoo.system.ProcessExecuter; import com.yahoo.vespa.flags.InMemoryFlagSource; import com.yahoo.vespa.hosted.dockerapi.Docker; -import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.Metrics; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeState; import com.yahoo.vespa.hosted.node.admin.configserver.orchestrator.Orchestrator; @@ -82,7 +81,7 @@ public class DockerTester implements AutoCloseable { NodeSpec hostSpec = new NodeSpec.Builder() .hostname(HOST_HOSTNAME.value()) .state(NodeState.active) - .nodeType(NodeType.host) + .type(NodeType.host) .flavor("default") .wantedRestartGeneration(1L) .currentRestartGeneration(1L) @@ -92,11 +91,11 @@ public class DockerTester implements AutoCloseable { FileSystem fileSystem = TestFileSystem.create(); DockerOperations dockerOperations = new DockerOperationsImpl(docker, processExecuter, ipAddresses); - MetricReceiverWrapper mr = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + Metrics metrics = new Metrics(); NodeAgentFactory nodeAgentFactory = contextSupplier -> new NodeAgentImpl( contextSupplier, nodeRepository, orchestrator, dockerOperations, storageMaintainer, flagSource, Optional.empty(), Optional.empty(), Optional.empty()); - nodeAdmin = new NodeAdminImpl(nodeAgentFactory, mr, Clock.systemUTC(), Duration.ofMillis(10), Duration.ZERO); + nodeAdmin = new NodeAdminImpl(nodeAgentFactory, metrics, Clock.systemUTC(), Duration.ofMillis(10), Duration.ZERO); NodeAgentContextFactory nodeAgentContextFactory = (nodeSpec, acl) -> new NodeAgentContextImpl.Builder(nodeSpec).acl(acl).fileSystem(fileSystem).build(); nodeAdminStateUpdater = new NodeAdminStateUpdater(nodeAgentContextFactory, nodeRepository, orchestrator, diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java index 27b11c3c1ba..8163f90e31f 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java @@ -28,15 +28,15 @@ public class MultiDockerTest { tester.addChildNodeRepositoryNode( new NodeSpec.Builder(nodeSpec2) .state(NodeState.dirty) - .minCpuCores(1) - .minMainMemoryAvailableGb(1) - .minDiskAvailableGb(1) + .vcpus(1) + .memoryGb(1) + .diskGb(1) .build()); tester.inOrder(tester.docker).deleteContainer(eq(new ContainerName("host2"))); tester.inOrder(tester.storageMaintainer).archiveNodeStorage( argThat(context -> context.containerName().equals(new ContainerName("host2")))); - tester.inOrder(tester.nodeRepository).setNodeState(eq(nodeSpec2.getHostname()), eq(NodeState.ready)); + tester.inOrder(tester.nodeRepository).setNodeState(eq(nodeSpec2.hostname()), eq(NodeState.ready)); addAndWaitForNode(tester, "host3.test.yahoo.com", DockerImage.fromString("image1")); } @@ -47,13 +47,13 @@ public class MultiDockerTest { .hostname(hostName) .wantedDockerImage(dockerImage) .state(NodeState.active) - .nodeType(NodeType.tenant) + .type(NodeType.tenant) .flavor("docker") .wantedRestartGeneration(1L) .currentRestartGeneration(1L) - .minCpuCores(2) - .minMainMemoryAvailableGb(4) - .minDiskAvailableGb(1) + .vcpus(2) + .memoryGb(4) + .diskGb(1) .build(); tester.addChildNodeRepositoryNode(nodeSpec); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java index ebf9d72ff1b..625166a10d2 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeRepoMock.java @@ -32,7 +32,7 @@ public class NodeRepoMock implements NodeRepository { public List<NodeSpec> getNodes(String baseHostName) { synchronized (monitor) { return nodeRepositoryNodesByHostname.values().stream() - .filter(node -> baseHostName.equals(node.getParentHostname().orElse(null))) + .filter(node -> baseHostName.equals(node.parentHostname().orElse(null))) .collect(Collectors.toList()); } } @@ -69,7 +69,7 @@ public class NodeRepoMock implements NodeRepository { void updateNodeRepositoryNode(NodeSpec nodeSpec) { synchronized (monitor) { - nodeRepositoryNodesByHostname.put(nodeSpec.getHostname(), nodeSpec); + nodeRepositoryNodesByHostname.put(nodeSpec.hostname(), nodeSpec); } } } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RebootTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RebootTest.java index 674c562cd88..4a232a5b2bd 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RebootTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RebootTest.java @@ -52,9 +52,9 @@ public class RebootTest { .hostname(hostname) .wantedDockerImage(dockerImage) .state(NodeState.active) - .nodeType(NodeType.tenant) + .type(NodeType.tenant) .flavor("docker") - .vespaVersion(Version.fromString("6.50.0")) + .currentVespaVersion(Version.fromString("6.50.0")) .wantedRestartGeneration(1L) .currentRestartGeneration(1L) .build(); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java index 82e5eca042c..bfc54cac045 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RestartTest.java @@ -30,7 +30,7 @@ public class RestartTest { .hostname(hostname) .state(NodeState.active) .wantedDockerImage(dockerImage) - .nodeType(NodeType.tenant) + .type(NodeType.tenant) .flavor("docker") .wantedRestartGeneration(1) .currentRestartGeneration(1) diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java index 36169a2b283..57b18606def 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java @@ -157,12 +157,12 @@ public class StorageMaintainerTest { NodeSpec nodeSpec = new NodeSpec.Builder() .hostname("host123-5.test.domain.tld") - .nodeType(nodeType) + .type(nodeType) .state(NodeState.active) .parentHostname("host123.test.domain.tld") .owner(new NodeOwner("tenant", "application", "instance")) .membership(new NodeMembership("clusterType", "clusterId", null, 0, false)) - .vespaVersion(Version.fromString("6.305.12")) + .currentVespaVersion(Version.fromString("6.305.12")) .flavor("d-2-8-50") .canonicalFlavor("d-2-8-50") .build(); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java index ce2e9f6d7a2..ca9b05a3ff6 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java @@ -2,13 +2,11 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin; import com.yahoo.config.provision.NodeType; -import com.yahoo.metrics.simple.MetricReceiver; import com.yahoo.test.ManualClock; -import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.Metrics; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeState; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext; -import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContextFactory; import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContextImpl; import org.junit.Test; import org.mockito.InOrder; @@ -40,11 +38,10 @@ import static org.mockito.Mockito.when; public class NodeAdminImplTest { private final NodeAgentWithSchedulerFactory nodeAgentWithSchedulerFactory = mock(NodeAgentWithSchedulerFactory.class); - private final NodeAgentContextFactory nodeAgentContextFactory = mock(NodeAgentContextFactory.class); private final ManualClock clock = new ManualClock(); private final NodeAdminImpl nodeAdmin = new NodeAdminImpl(nodeAgentWithSchedulerFactory, - new MetricReceiverWrapper(MetricReceiver.nullImplementation), clock, Duration.ZERO, Duration.ZERO); + new Metrics(), clock, Duration.ZERO, Duration.ZERO); @Test public void nodeAgentsAreProperlyLifeCycleManaged() { @@ -163,7 +160,7 @@ public class NodeAdminImplTest { NodeSpec nodeSpec = new NodeSpec.Builder() .hostname(hostname) .state(NodeState.active) - .nodeType(NodeType.tenant) + .type(NodeType.tenant) .flavor("default") .build(); diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java index b8894bbf814..bb18e261301 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java @@ -59,7 +59,7 @@ public class NodeAdminStateUpdaterTest { public void state_convergence() { mockNodeRepo(NodeState.active, 4); List<String> activeHostnames = nodeRepository.getNodes(hostHostname.value()).stream() - .map(NodeSpec::getHostname) + .map(NodeSpec::hostname) .collect(Collectors.toList()); List<String> suspendHostnames = new ArrayList<>(activeHostnames); suspendHostnames.add(hostHostname.value()); @@ -170,7 +170,7 @@ public class NodeAdminStateUpdaterTest { // When doing batch suspend, only suspend the containers if the host is not active List<String> activeHostnames = nodeRepository.getNodes(hostHostname.value()).stream() - .map(NodeSpec::getHostname) + .map(NodeSpec::hostname) .collect(Collectors.toList()); updater.converge(SUSPENDED); verify(orchestrator, times(1)).suspend(eq(hostHostname.value()), eq(activeHostnames)); @@ -206,9 +206,9 @@ public class NodeAdminStateUpdaterTest { updater.adjustNodeAgentsToRunFromNodeRepository(); updater.adjustNodeAgentsToRunFromNodeRepository(); - verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host1.yahoo.com")), eq(acl)); - verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host2.yahoo.com")), eq(acl)); - verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host3.yahoo.com")), eq(acl)); + verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host1.yahoo.com")), eq(acl)); + verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl)); + verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host3.yahoo.com")), eq(acl)); verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value())); verify(nodeRepository, times(1)).getAcls(eq(hostHostname.value())); } @@ -224,9 +224,9 @@ public class NodeAdminStateUpdaterTest { updater.adjustNodeAgentsToRunFromNodeRepository(); updater.adjustNodeAgentsToRunFromNodeRepository(); - verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host1.yahoo.com")), eq(acl)); - verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host2.yahoo.com")), eq(acl)); - verify(nodeAgentContextFactory, times(1)).create(argThat(spec -> spec.getHostname().equals("host3.yahoo.com")), eq(Acl.EMPTY)); + verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host1.yahoo.com")), eq(acl)); + verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl)); + verify(nodeAgentContextFactory, times(1)).create(argThat(spec -> spec.hostname().equals("host3.yahoo.com")), eq(Acl.EMPTY)); verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value())); verify(nodeRepository, times(2)).getAcls(eq(hostHostname.value())); // During the first tick, the cache is invalidated and retried } @@ -241,8 +241,8 @@ public class NodeAdminStateUpdaterTest { updater.adjustNodeAgentsToRunFromNodeRepository(); updater.adjustNodeAgentsToRunFromNodeRepository(); - verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host1.yahoo.com")), eq(acl)); - verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.getHostname().equals("host2.yahoo.com")), eq(acl)); + verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host1.yahoo.com")), eq(acl)); + verify(nodeAgentContextFactory, times(3)).create(argThat(spec -> spec.hostname().equals("host2.yahoo.com")), eq(acl)); verify(nodeRepository, times(3)).getNodes(eq(hostHostname.value())); verify(nodeRepository, times(1)).getAcls(eq(hostHostname.value())); } @@ -261,11 +261,11 @@ public class NodeAdminStateUpdaterTest { .mapToObj(i -> new NodeSpec.Builder() .hostname("host" + i + ".yahoo.com") .state(NodeState.active) - .nodeType(NodeType.tenant) + .type(NodeType.tenant) .flavor("docker") - .minCpuCores(1) - .minMainMemoryAvailableGb(1) - .minDiskAvailableGb(1) + .vcpus(1) + .memoryGb(1) + .diskGb(1) .build()) .collect(Collectors.toList()); @@ -274,11 +274,11 @@ public class NodeAdminStateUpdaterTest { when(nodeRepository.getNode(eq(hostHostname.value()))).thenReturn(new NodeSpec.Builder() .hostname(hostHostname.value()) .state(hostState) - .nodeType(NodeType.tenant) + .type(NodeType.tenant) .flavor("default") - .minCpuCores(1) - .minMainMemoryAvailableGb(1) - .minDiskAvailableGb(1) + .vcpus(1) + .memoryGb(1) + .diskGb(1) .build()); } diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java index c43750684f6..b4db8ff40d5 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java @@ -5,7 +5,6 @@ import com.yahoo.component.Version; import com.yahoo.config.provision.DockerImage; import com.yahoo.config.provision.NodeType; import com.yahoo.io.IOUtils; -import com.yahoo.metrics.simple.MetricReceiver; import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.flags.InMemoryFlagSource; import com.yahoo.vespa.hosted.dockerapi.Container; @@ -13,7 +12,7 @@ import com.yahoo.vespa.hosted.dockerapi.ContainerName; import com.yahoo.vespa.hosted.dockerapi.ContainerResources; import com.yahoo.vespa.hosted.dockerapi.ContainerStats; import com.yahoo.vespa.hosted.dockerapi.exception.DockerException; -import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper; +import com.yahoo.vespa.hosted.dockerapi.metrics.Metrics; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeAttributes; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeMembership; import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeOwner; @@ -34,10 +33,8 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.Collections; -import java.util.Map; +import java.util.List; import java.util.Optional; -import java.util.Set; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; @@ -67,11 +64,11 @@ public class NodeAgentImplTest { private final String hostName = "host1.test.yahoo.com"; private final NodeSpec.Builder nodeBuilder = new NodeSpec.Builder() .hostname(hostName) - .nodeType(NodeType.tenant) + .type(NodeType.tenant) .flavor("docker") - .minCpuCores(MIN_CPU_CORES) - .minMainMemoryAvailableGb(MIN_MAIN_MEMORY_AVAILABLE_GB) - .minDiskAvailableGb(MIN_DISK_AVAILABLE_GB); + .vcpus(MIN_CPU_CORES) + .memoryGb(MIN_MAIN_MEMORY_AVAILABLE_GB) + .diskGb(MIN_DISK_AVAILABLE_GB); private final NodeAgentContextSupplier contextSupplier = mock(NodeAgentContextSupplier.class); private final DockerImage dockerImage = DockerImage.fromString("dockerImage"); @@ -79,7 +76,7 @@ public class NodeAgentImplTest { private final NodeRepository nodeRepository = mock(NodeRepository.class); private final Orchestrator orchestrator = mock(Orchestrator.class); private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class); - private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation); + private final Metrics metrics = new Metrics(); private final AclMaintainer aclMaintainer = mock(AclMaintainer.class); private final HealthChecker healthChecker = mock(HealthChecker.class); private final CredentialsMaintainer credentialsMaintainer = mock(CredentialsMaintainer.class); @@ -93,7 +90,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .state(NodeState.active) .wantedVespaVersion(vespaVersion) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .build(); NodeAgentContext context = createContext(node); @@ -122,7 +119,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .state(NodeState.active) .wantedVespaVersion(vespaVersion) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .build(); NodeAgentContext context = createContext(node); @@ -143,7 +140,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .state(NodeState.active) .wantedVespaVersion(vespaVersion) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .build(); NodeAgentContext context = createContext(node); @@ -217,7 +214,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .state(NodeState.active) .wantedVespaVersion(vespaVersion) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .build(); NodeAgentContext context = createContext(node); @@ -244,7 +241,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .state(NodeState.active) .wantedVespaVersion(vespaVersion) - .vespaVersion(vespaVersion); + .currentVespaVersion(vespaVersion); NodeAgentContext firstContext = createContext(specBuilder.build()); NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true); @@ -253,9 +250,9 @@ public class NodeAgentImplTest { when(storageMaintainer.getDiskUsageFor(any())).thenReturn(Optional.of(201326592000L)); nodeAgent.doConverge(firstContext); - NodeAgentContext secondContext = createContext(specBuilder.minDiskAvailableGb(200).build()); + NodeAgentContext secondContext = createContext(specBuilder.diskGb(200).build()); nodeAgent.doConverge(secondContext); - NodeAgentContext thirdContext = createContext(specBuilder.minCpuCores(4).build()); + NodeAgentContext thirdContext = createContext(specBuilder.vcpus(4).build()); nodeAgent.doConverge(thirdContext); ContainerResources resourcesAfterThird = ContainerResources.from(0, 4, 16); mockGetContainer(dockerImage, resourcesAfterThird, true); @@ -291,7 +288,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .state(NodeState.active) .wantedVespaVersion(vespaVersion) - .vespaVersion(vespaVersion); + .currentVespaVersion(vespaVersion); NodeAgentContext firstContext = createContext(specBuilder.build()); NodeAgentImpl nodeAgent = makeNodeAgent(dockerImage, true); @@ -300,7 +297,7 @@ public class NodeAgentImplTest { when(storageMaintainer.getDiskUsageFor(any())).thenReturn(Optional.of(201326592000L)); nodeAgent.doConverge(firstContext); - NodeAgentContext secondContext = createContext(specBuilder.minMainMemoryAvailableGb(20).build()); + NodeAgentContext secondContext = createContext(specBuilder.memoryGb(20).build()); nodeAgent.doConverge(secondContext); ContainerResources resourcesAfterThird = ContainerResources.from(0, 2, 20); mockGetContainer(dockerImage, resourcesAfterThird, true); @@ -328,7 +325,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .state(NodeState.active) .wantedVespaVersion(vespaVersion) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .wantedRestartGeneration(wantedRestartGeneration) .currentRestartGeneration(currentRestartGeneration) .build(); @@ -360,7 +357,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .state(NodeState.active) .wantedVespaVersion(vespaVersion) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .wantedRebootGeneration(wantedRebootGeneration) .currentRebootGeneration(currentRebootGeneration) .build(); @@ -403,7 +400,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .state(NodeState.failed) .wantedVespaVersion(vespaVersion) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .build(); NodeAgentContext context = createContext(node); @@ -449,7 +446,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .state(NodeState.inactive) .wantedVespaVersion(vespaVersion) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .build(); NodeAgentContext context = createContext(node); @@ -551,7 +548,7 @@ public class NodeAgentImplTest { .currentDockerImage(dockerImage) .wantedDockerImage(dockerImage) .state(NodeState.active) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .build(); NodeAgentContext context = createContext(node); @@ -573,7 +570,7 @@ public class NodeAgentImplTest { .wantedDockerImage(dockerImage) .currentDockerImage(dockerImage) .state(NodeState.active) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .build(); NodeAgentContext context = createContext(node); @@ -654,10 +651,10 @@ public class NodeAgentImplTest { .wantedDockerImage(dockerImage) .currentDockerImage(dockerImage) .state(NodeState.active) - .vespaVersion(vespaVersion) + .currentVespaVersion(vespaVersion) .owner(owner) .membership(membership) - .minMainMemoryAvailableGb(2) + .memoryGb(2) .allowedToBeDown(true) .parentHostname("parent.host.name.yahoo.com") .build(); @@ -688,9 +685,6 @@ public class NodeAgentImplTest { .replaceAll("\"timestamp\":\\d+", "\"timestamp\":0") .replaceAll("([0-9]+\\.[0-9]{1,3})([0-9]*)", "$1"); // Only keep the first 3 decimals - // TODO: Remove when old metrics proxy is discontinued. - calledCommand[3] = calledCommand[3].replaceFirst("19091", "19095"); - assertEquals(context, calledContainerName); assertEquals(5L, calledTimeout); assertArrayEquals(expectedCommand, calledCommand); @@ -713,14 +707,13 @@ public class NodeAgentImplTest { nodeAgent.updateContainerNodeMetrics(); - Set<Map<String, Object>> actualMetrics = metricReceiver.getDefaultMetricsRaw(); - assertEquals(Collections.emptySet(), actualMetrics); + assertEquals(List.of(), metrics.getDefaultMetrics()); } @Test public void testRunningConfigServer() { final NodeSpec node = nodeBuilder - .nodeType(NodeType.config) + .type(NodeType.config) .wantedDockerImage(dockerImage) .state(NodeState.active) .wantedVespaVersion(vespaVersion) diff --git a/node-admin/src/test/resources/expected.container.system.metrics.txt b/node-admin/src/test/resources/expected.container.system.metrics.txt index c44d72b395e..ec750798c98 100644 --- a/node-admin/src/test/resources/expected.container.system.metrics.txt +++ b/node-admin/src/test/resources/expected.container.system.metrics.txt @@ -1,83 +1,80 @@ s: { - "routing": { - "yamas": { - "namespaces": - ["Vespa"] - } - }, "application": "vespa.node", + "dimensions": { + "host": "host1.test.yahoo.com", + "orchestratorState":"ALLOWED_TO_BE_DOWN", + "parentHostname": "parent.host.name.yahoo.com", + "role": "tenants", + "state": "active" + }, "metrics": { - "mem.limit": 4294967296, - "mem.used": 1073741824, - "mem_total.util": 40.808, - "mem_total.used": 1752707072, - "disk.used": 39625000000, "cpu.sys.util": 3.402, - "disk.util": 15.85, - "cpu.vcpus": 2.0, "cpu.util": 5.4, + "cpu.vcpus": 2.0, + "disk.limit": 250000000000, + "disk.used": 39625000000, + "disk.util": 15.85, + "mem.limit": 4294967296, + "mem.used": 1073741824, "mem.util": 25.0, - "disk.limit": 250000000000 + "mem_total.used": 1752707072, + "mem_total.util": 40.808 }, - "dimensions": { - "host": "host1.test.yahoo.com", - "orchestratorState":"ALLOWED_TO_BE_DOWN", - "role": "tenants", - "state": "active", - "parentHostname": "parent.host.name.yahoo.com" + "routing": { + "yamas": { + "namespaces": ["Vespa"] + } }, "timestamp": 0 } { - "routing": { - "yamas": { - "namespaces": - ["Vespa"] - } - }, "application": "vespa.node", + "dimensions": { + "host": "host1.test.yahoo.com", + "interface": "eth0", + "orchestratorState":"ALLOWED_TO_BE_DOWN", + "parentHostname": "parent.host.name.yahoo.com", + "role": "tenants", + "state": "active" + }, "metrics": { - "net.out.bytes": 20303455, + "net.in.bytes": 19499270, "net.in.dropped": 4, + "net.in.errors": 55, + "net.out.bytes": 20303455, "net.out.dropped": 13, - "net.in.bytes": 19499270, - "net.out.errors": 3, - "net.in.errors": 55 + "net.out.errors": 3 }, - "dimensions": { - "role": "tenants", - "host": "host1.test.yahoo.com", - "orchestratorState":"ALLOWED_TO_BE_DOWN", - "state": "active", - "interface": "eth0", - "parentHostname": "parent.host.name.yahoo.com" + "routing": { + "yamas": { + "namespaces": ["Vespa"] + } }, "timestamp": 0 } { - "routing": { - "yamas": { - "namespaces": - ["Vespa"] - } - }, "application": "vespa.node", + "dimensions": { + "host": "host1.test.yahoo.com", + "interface": "eth1", + "orchestratorState":"ALLOWED_TO_BE_DOWN", + "parentHostname": "parent.host.name.yahoo.com", + "role": "tenants", + "state": "active" + }, "metrics": { - "net.out.bytes": 54246745, + "net.in.bytes": 3245766, "net.in.dropped": 0, + "net.in.errors": 0, + "net.out.bytes": 54246745, "net.out.dropped": 0, - "net.in.bytes": 3245766, - "net.out.errors": 0, - "net.in.errors": 0 + "net.out.errors": 0 }, - "dimensions": { - "role": "tenants", - "host": "host1.test.yahoo.com", - "orchestratorState":"ALLOWED_TO_BE_DOWN", - "state": "active", - "interface": "eth1", - "parentHostname": "parent.host.name.yahoo.com" + "routing": { + "yamas": { + "namespaces": ["Vespa"] + } }, "timestamp": 0 }
\ No newline at end of file diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java index bedfbc5bdc1..9b78f558a7a 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java @@ -399,10 +399,7 @@ public class NodeRepository extends AbstractComponent { public void deactivate(ApplicationId application, NestedTransaction transaction) { try (Mutex lock = lock(application)) { - db.writeTo(Node.State.inactive, - db.getNodes(application, Node.State.reserved, Node.State.active), - Agent.application, Optional.empty(), transaction - ); + deactivate(db.getNodes(application, Node.State.reserved, Node.State.active), transaction); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java index 58c576d3f44..369366a1f08 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancer.java @@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.lb; import com.yahoo.vespa.hosted.provision.maintenance.LoadBalancerExpirer; +import java.time.Instant; import java.util.Objects; /** @@ -14,12 +15,14 @@ public class LoadBalancer { private final LoadBalancerId id; private final LoadBalancerInstance instance; - private final boolean inactive; + private final State state; + private final Instant changedAt; - public LoadBalancer(LoadBalancerId id, LoadBalancerInstance instance, boolean inactive) { + public LoadBalancer(LoadBalancerId id, LoadBalancerInstance instance, State state, Instant changedAt) { this.id = Objects.requireNonNull(id, "id must be non-null"); this.instance = Objects.requireNonNull(instance, "instance must be non-null"); - this.inactive = inactive; + this.state = Objects.requireNonNull(state, "state must be non-null"); + this.changedAt = Objects.requireNonNull(changedAt, "changedAt must be non-null"); } /** An identifier for this load balancer. The ID is unique inside the zone */ @@ -32,17 +35,48 @@ public class LoadBalancer { return instance; } - /** - * Returns whether this load balancer is inactive. Inactive load balancers are eventually removed by - * {@link LoadBalancerExpirer}. Inactive load balancers may be reactivated if a deleted cluster is redeployed. - */ - public boolean inactive() { - return inactive; + /** The current state of this */ + public State state() { + return state; } - /** Return a copy of this that is set inactive */ - public LoadBalancer deactivate() { - return new LoadBalancer(id, instance, true); + /** Returns when this was last changed */ + public Instant changedAt() { + return changedAt; + } + + /** Returns a copy of this with state set to given state */ + public LoadBalancer with(State state, Instant changedAt) { + if (changedAt.isBefore(this.changedAt)) { + throw new IllegalArgumentException("Invalid changeAt: '" + changedAt + "' is before existing value '" + + this.changedAt + "'"); + } + if (this.state == State.active && state == State.reserved) { + throw new IllegalArgumentException("Invalid state transition: " + this.state + " -> " + state); + } + return new LoadBalancer(id, instance, state, changedAt); + } + + /** Returns a copy of this with instance set to given instance */ + public LoadBalancer with(LoadBalancerInstance instance) { + return new LoadBalancer(id, instance, state, changedAt); + } + + public enum State { + + /** This load balancer has been provisioned and reserved for an application */ + reserved, + + /** + * The load balancer has been deactivated and is ready to be removed. Inactive load balancers are eventually + * removed by {@link LoadBalancerExpirer}. Inactive load balancers may be reactivated if a deleted cluster is + * redeployed. + */ + inactive, + + /** The load balancer is in active use by an application */ + active, + } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java index ba7a83169ad..c0bb53ddfe4 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerList.java @@ -29,7 +29,7 @@ public class LoadBalancerList { /** Returns the subset of load balancers that are inactive */ public LoadBalancerList inactive() { - return of(loadBalancers.stream().filter(LoadBalancer::inactive)); + return of(loadBalancers.stream().filter(lb -> lb.state() == LoadBalancer.State.inactive)); } public List<LoadBalancer> asList() { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java index 371ed4d2496..61ca19a4cb9 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java @@ -484,7 +484,7 @@ public class CuratorDatabaseClient { } private Optional<LoadBalancer> readLoadBalancer(LoadBalancerId id) { - return read(loadBalancerPath(id), LoadBalancerSerializer::fromJson); + return read(loadBalancerPath(id), (data) -> LoadBalancerSerializer.fromJson(data, clock.instant())); } public void writeLoadBalancer(LoadBalancer loadBalancer) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java index a4b915a6128..d04dd2b5c18 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializer.java @@ -15,9 +15,9 @@ import com.yahoo.vespa.hosted.provision.lb.Real; import java.io.IOException; import java.io.UncheckedIOException; +import java.time.Instant; import java.util.LinkedHashSet; import java.util.Optional; -import java.util.Set; import java.util.function.Function; /** @@ -27,14 +27,22 @@ import java.util.function.Function; */ public class LoadBalancerSerializer { + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: + // - ADDING FIELDS: Always ok + // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. + private static final String idField = "id"; private static final String hostnameField = "hostname"; + private static final String stateField = "state"; + private static final String changedAtField = "changedAt"; private static final String dnsZoneField = "dnsZone"; private static final String inactiveField = "inactive"; private static final String portsField = "ports"; private static final String networksField = "networks"; private static final String realsField = "reals"; - private static final String nameField = "name"; private static final String ipAddressField = "ipAddress"; private static final String portField = "port"; @@ -44,6 +52,8 @@ public class LoadBalancerSerializer { root.setString(idField, loadBalancer.id().serializedForm()); root.setString(hostnameField, loadBalancer.instance().hostname().toString()); + root.setString(stateField, asString(loadBalancer.state())); + root.setLong(changedAtField, loadBalancer.changedAt().toEpochMilli()); loadBalancer.instance().dnsZone().ifPresent(dnsZone -> root.setString(dnsZoneField, dnsZone.id())); Cursor portArray = root.setArray(portsField); loadBalancer.instance().ports().forEach(portArray::addLong); @@ -56,8 +66,6 @@ public class LoadBalancerSerializer { realObject.setString(ipAddressField, real.ipAddress()); realObject.setLong(portField, real.port()); }); - root.setBool(inactiveField, loadBalancer.inactive()); - try { return SlimeUtils.toJsonBytes(slime); } catch (IOException e) { @@ -65,10 +73,10 @@ public class LoadBalancerSerializer { } } - public static LoadBalancer fromJson(byte[] data) { + public static LoadBalancer fromJson(byte[] data, Instant defaultChangedAt) { Cursor object = SlimeUtils.jsonToSlime(data).get(); - Set<Real> reals = new LinkedHashSet<>(); + var reals = new LinkedHashSet<Real>(); object.field(realsField).traverse((ArrayTraverser) (i, realObject) -> { reals.add(new Real(HostName.from(realObject.field(hostnameField).asString()), realObject.field(ipAddressField).asString(), @@ -76,25 +84,61 @@ public class LoadBalancerSerializer { }); - Set<Integer> ports = new LinkedHashSet<>(); + var ports = new LinkedHashSet<Integer>(); object.field(portsField).traverse((ArrayTraverser) (i, port) -> ports.add((int) port.asLong())); - Set<String> networks = new LinkedHashSet<>(); + var networks = new LinkedHashSet<String>(); object.field(networksField).traverse((ArrayTraverser) (i, network) -> networks.add(network.asString())); return new LoadBalancer(LoadBalancerId.fromSerializedForm(object.field(idField).asString()), new LoadBalancerInstance( HostName.from(object.field(hostnameField).asString()), - optionalField(object.field(dnsZoneField), DnsZone::new), + optionalString(object.field(dnsZoneField), DnsZone::new), ports, networks, reals ), - object.field(inactiveField).asBool()); + stateFromSlime(object), + instantFromSlime(object.field(changedAtField), defaultChangedAt)); + } + + private static Instant instantFromSlime(Cursor field, Instant defaultValue) { + return optionalValue(field, (value) -> Instant.ofEpochMilli(value.asLong())).orElse(defaultValue); } - private static <T> Optional<T> optionalField(Inspector field, Function<String, T> fieldMapper) { - return Optional.of(field).filter(Inspector::valid).map(Inspector::asString).map(fieldMapper); + private static LoadBalancer.State stateFromSlime(Inspector object) { + var inactiveValue = optionalValue(object.field(inactiveField), Inspector::asBool); + if (inactiveValue.isPresent()) { // TODO(mpolden): Remove reading of "inactive" field after June 2019 + return inactiveValue.get() ? LoadBalancer.State.inactive : LoadBalancer.State.active; + } else { + return stateFromString(object.field(stateField).asString()); + } + } + + private static <T> Optional<T> optionalValue(Inspector field, Function<Inspector, T> fieldMapper) { + return Optional.of(field).filter(Inspector::valid).map(fieldMapper); + } + + private static <T> Optional<T> optionalString(Inspector field, Function<String, T> fieldMapper) { + return optionalValue(field, Inspector::asString).map(fieldMapper); + } + + private static String asString(LoadBalancer.State state) { + switch (state) { + case active: return "active"; + case inactive: return "inactive"; + case reserved: return "reserved"; + default: throw new IllegalArgumentException("No serialization defined for state enum '" + state + "'"); + } + } + + private static LoadBalancer.State stateFromString(String state) { + switch (state) { + case "active": return LoadBalancer.State.active; + case "inactive": return LoadBalancer.State.inactive; + case "reserved": return LoadBalancer.State.reserved; + default: throw new IllegalArgumentException("No serialization defined for state string '" + state + "'"); + } } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java index d38a6e5031c..424889caf72 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java @@ -44,12 +44,12 @@ import java.util.function.UnaryOperator; */ public class NodeSerializer { - // WARNING: Since there are multiple config servers in a cluster and they upgrade one by one - // (and rewrite all nodes on startup), - // changes to the serialized format must be made such that what is serialized on version N+1 - // can be read by version N: + // WARNING: Since there are multiple servers in a ZooKeeper cluster and they upgrade one by one + // (and rewrite all nodes on startup), changes to the serialized format must be made + // such that what is serialized on version N+1 can be read by version N: // - ADDING FIELDS: Always ok // - REMOVING FIELDS: Stop reading the field first. Stop writing it on a later version. + // - CHANGING THE FORMAT OF A FIELD: Don't do it bro. /** The configured node flavors */ private final NodeFlavors flavors; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java index 4626a600d2c..1e83c2c9176 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java @@ -2,6 +2,8 @@ package com.yahoo.vespa.hosted.provision.provisioning; import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.ClusterMembership; +import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.HostSpec; import com.yahoo.config.provision.ParentHostUnavailableException; import com.yahoo.transaction.Mutex; @@ -22,16 +24,26 @@ import java.util.function.Function; import java.util.stream.Collectors; /** - * Performs activation of nodes for an application + * Performs activation of resources for an application. E.g. nodes or load balancers. * * @author bratseth */ class Activator { private final NodeRepository nodeRepository; + private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner; - public Activator(NodeRepository nodeRepository) { + public Activator(NodeRepository nodeRepository, Optional<LoadBalancerProvisioner> loadBalancerProvisioner) { this.nodeRepository = nodeRepository; + this.loadBalancerProvisioner = loadBalancerProvisioner; + } + + /** Activate required resources for given application */ + public void activate(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction) { + try (Mutex lock = nodeRepository.lock(application)) { + activateNodes(application, hosts, transaction, lock); + activateLoadBalancers(application, hosts, lock); + } } /** @@ -46,36 +58,50 @@ class Activator { * @param transaction Transaction with operations to commit together with any operations done within the repository. * @param application the application to allocate nodes for * @param hosts the hosts to make the set of active nodes of this + * @param applicationLock application lock that must be held when calling this */ - public void activate(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction) { - try (Mutex lock = nodeRepository.lock(application)) { - Set<String> hostnames = hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet()); - NodeList allNodes = nodeRepository.list(); - NodeList applicationNodes = allNodes.owner(application); - - List<Node> reserved = applicationNodes.state(Node.State.reserved).asList(); - List<Node> reservedToActivate = retainHostsInList(hostnames, reserved); - List<Node> active = applicationNodes.state(Node.State.active).asList(); - List<Node> continuedActive = retainHostsInList(hostnames, active); - List<Node> allActive = new ArrayList<>(continuedActive); - allActive.addAll(reservedToActivate); - if ( ! containsAll(hostnames, allActive)) - throw new IllegalArgumentException("Activation of " + application + " failed. " + - "Could not find all requested hosts." + - "\nRequested: " + hosts + - "\nReserved: " + toHostNames(reserved) + - "\nActive: " + toHostNames(active) + - "\nThis might happen if the time from reserving host to activation takes " + - "longer time than reservation expiry (the hosts will then no longer be reserved)"); - - validateParentHosts(application, allNodes, reservedToActivate); - - List<Node> activeToRemove = removeHostsFromList(hostnames, active); - activeToRemove = activeToRemove.stream().map(Node::unretire).collect(Collectors.toList()); // only active nodes can be retired - nodeRepository.deactivate(activeToRemove, transaction); - nodeRepository.activate(updateFrom(hosts, continuedActive), transaction); // update active with any changes - nodeRepository.activate(updatePortsFrom(hosts, reservedToActivate), transaction); - } + private void activateNodes(ApplicationId application, Collection<HostSpec> hosts, NestedTransaction transaction, + @SuppressWarnings("unused") Mutex applicationLock) { + Set<String> hostnames = hosts.stream().map(HostSpec::hostname).collect(Collectors.toSet()); + NodeList allNodes = nodeRepository.list(); + NodeList applicationNodes = allNodes.owner(application); + + List<Node> reserved = applicationNodes.state(Node.State.reserved).asList(); + List<Node> reservedToActivate = retainHostsInList(hostnames, reserved); + List<Node> active = applicationNodes.state(Node.State.active).asList(); + List<Node> continuedActive = retainHostsInList(hostnames, active); + List<Node> allActive = new ArrayList<>(continuedActive); + allActive.addAll(reservedToActivate); + if (!containsAll(hostnames, allActive)) + throw new IllegalArgumentException("Activation of " + application + " failed. " + + "Could not find all requested hosts." + + "\nRequested: " + hosts + + "\nReserved: " + toHostNames(reserved) + + "\nActive: " + toHostNames(active) + + "\nThis might happen if the time from reserving host to activation takes " + + "longer time than reservation expiry (the hosts will then no longer be reserved)"); + + validateParentHosts(application, allNodes, reservedToActivate); + + List<Node> activeToRemove = removeHostsFromList(hostnames, active); + activeToRemove = activeToRemove.stream().map(Node::unretire).collect(Collectors.toList()); // only active nodes can be retired + nodeRepository.deactivate(activeToRemove, transaction); + nodeRepository.activate(updateFrom(hosts, continuedActive), transaction); // update active with any changes + nodeRepository.activate(updatePortsFrom(hosts, reservedToActivate), transaction); + } + + /** Activate load balancers */ + private void activateLoadBalancers(ApplicationId application, Collection<HostSpec> hosts, + @SuppressWarnings("unused") Mutex applicationLock) { + loadBalancerProvisioner.ifPresent(provisioner -> provisioner.activate(application, clustersOf(hosts))); + } + + private static List<ClusterSpec> clustersOf(Collection<HostSpec> hosts) { + return hosts.stream() + .map(HostSpec::membership) + .flatMap(Optional::stream) + .map(ClusterMembership::cluster) + .collect(Collectors.toUnmodifiableList()); } private static void validateParentHosts(ApplicationId application, NodeList nodes, List<Node> potentialChildren) { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java index 77872fc1435..fbf97ba25d9 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/FlavorConfigBuilder.java @@ -1,9 +1,9 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.provisioning; -import com.yahoo.config.provisioning.FlavorsConfig; import com.yahoo.config.provision.Flavor; import com.yahoo.config.provision.NodeFlavors; +import com.yahoo.config.provisioning.FlavorsConfig; /** * Simplifies creation of a node-repository config containing flavors. @@ -22,7 +22,6 @@ public class FlavorConfigBuilder { public FlavorsConfig.Flavor.Builder addFlavor(String flavorName, double cpu, double mem, double disk, Flavor.Type type) { FlavorsConfig.Flavor.Builder flavor = new FlavorsConfig.Flavor.Builder(); flavor.name(flavorName); - flavor.description("Flavor-name-is-" + flavorName); flavor.minDiskAvailableGb(disk); flavor.minCpuCores(cpu); flavor.minMainMemoryAvailableGb(mem); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java index 372dca84a53..6e688a08c84 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java @@ -18,8 +18,6 @@ import com.yahoo.vespa.hosted.provision.lb.Real; import com.yahoo.vespa.hosted.provision.node.IP; import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient; -import java.util.Collections; -import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -27,10 +25,14 @@ import java.util.Set; import java.util.stream.Collectors; /** - * Provides provisioning of load balancers for applications. + * Provisions and configures application load balancers. * * @author mpolden */ +// Load balancer state transitions: +// 1) (new) -> reserved -> active +// 2) active | reserved -> inactive +// 3) inactive -> active | (removed) public class LoadBalancerProvisioner { private final NodeRepository nodeRepository; @@ -44,43 +46,72 @@ public class LoadBalancerProvisioner { } /** - * Provision load balancer(s) for given application. + * Prepare a load balancer for given application and cluster. * - * If the application has multiple container clusters, one load balancer will be provisioned for each cluster. + * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated + * nodes. It's state will remain unchanged. + * + * If no load balancer exists, a new one will be provisioned in {@link LoadBalancer.State#reserved}. + * + * Calling this for irrelevant node or cluster types is a no-op. */ - public Map<LoadBalancerId, LoadBalancer> provision(ApplicationId application) { - try (Mutex applicationLock = nodeRepository.lock(application)) { - try (Mutex loadBalancersLock = db.lockLoadBalancers()) { - Map<LoadBalancerId, LoadBalancer> loadBalancers = new LinkedHashMap<>(); - for (Map.Entry<ClusterSpec, List<Node>> kv : activeContainers(application).entrySet()) { - LoadBalancerId id = new LoadBalancerId(application, kv.getKey().id()); - LoadBalancerInstance instance = create(application, kv.getKey().id(), kv.getValue()); - // Load balancer is always re-activated here to avoid reallocation if an application/cluster is - // deleted and then redeployed. - LoadBalancer loadBalancer = new LoadBalancer(id, instance, false); - loadBalancers.put(loadBalancer.id(), loadBalancer); - db.writeLoadBalancer(loadBalancer); - } - return Collections.unmodifiableMap(loadBalancers); - } + public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) { + if (requestedNodes.type() != NodeType.tenant) return; // Nothing to provision for this node type + if (cluster.type() != ClusterSpec.Type.container) return; // Nothing to provision for this cluster type + provision(application, cluster.id(), false); + } + + /** + * Activate load balancer for given application and cluster. + * + * If a load balancer for the cluster already exists, it will be reconfigured based on the currently allocated + * nodes and the load balancer itself will be moved to {@link LoadBalancer.State#active}. + * + * Calling this when no load balancer has been prepared for given cluster is a no-op. + */ + public void activate(ApplicationId application, List<ClusterSpec> clusters) { + for (var clusterId : containerClusterIdsOf(clusters)) { + // Provision again to ensure that load balancer instance re-configured with correct nodes + provision(application, clusterId, true); } } /** * Deactivate all load balancers assigned to given application. This is a no-op if an application does not have any - * load balancer(s) + * load balancer(s). */ public void deactivate(ApplicationId application, NestedTransaction transaction) { try (Mutex applicationLock = nodeRepository.lock(application)) { try (Mutex loadBalancersLock = db.lockLoadBalancers()) { - List<LoadBalancer> deactivatedLoadBalancers = nodeRepository.loadBalancers().owner(application).asList().stream() - .map(LoadBalancer::deactivate) - .collect(Collectors.toList()); + var now = nodeRepository.clock().instant(); + var deactivatedLoadBalancers = nodeRepository.loadBalancers().owner(application).asList().stream() + .map(lb -> lb.with(LoadBalancer.State.inactive, now)) + .collect(Collectors.toList()); db.writeLoadBalancers(deactivatedLoadBalancers, transaction); } } } + /** Idempotently provision a load balancer for given application and cluster */ + private void provision(ApplicationId application, ClusterSpec.Id clusterId, boolean activate) { + try (var applicationLock = nodeRepository.lock(application)) { + try (var loadBalancersLock = db.lockLoadBalancers()) { + var id = new LoadBalancerId(application, clusterId); + var now = nodeRepository.clock().instant(); + var instance = create(application, clusterId, allocatedContainers(application, clusterId)); + var loadBalancer = db.readLoadBalancers().get(id); + if (loadBalancer == null) { + if (activate) return; // Nothing to activate as this load balancer was never prepared + loadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now); + } else { + var newState = activate ? LoadBalancer.State.active : loadBalancer.state(); + loadBalancer = loadBalancer.with(instance).with(newState, now); + } + db.writeLoadBalancer(loadBalancer); + } + } + } + private LoadBalancerInstance create(ApplicationId application, ClusterSpec.Id cluster, List<Node> nodes) { Map<HostName, Set<String>> hostnameToIpAdresses = nodes.stream() .collect(Collectors.toMap(node -> HostName.from(node.hostname()), @@ -92,15 +123,14 @@ public class LoadBalancerProvisioner { return service.create(application, cluster, reals); } - /** Returns a list of active containers for given application, grouped by cluster spec */ - private Map<ClusterSpec, List<Node>> activeContainers(ApplicationId application) { - return new NodeList(nodeRepository.getNodes(NodeType.tenant, Node.State.active)) + /** Returns a list of active and reserved nodes of type container in given cluster */ + private List<Node> allocatedContainers(ApplicationId application, ClusterSpec.Id clusterId) { + return new NodeList(nodeRepository.getNodes(NodeType.tenant, Node.State.reserved, Node.State.active)) .owner(application) .filter(node -> node.state().isAllocated()) .type(ClusterSpec.Type.container) - .asList() - .stream() - .collect(Collectors.groupingBy(n -> n.allocation().get().membership().cluster())); + .filter(node -> node.allocation().get().membership().cluster().id().equals(clusterId)) + .asList(); } /** Find IP addresses reachable by the load balancer service */ @@ -118,4 +148,11 @@ public class LoadBalancerProvisioner { return reachable; } + private static List<ClusterSpec.Id> containerClusterIdsOf(List<ClusterSpec> clusters) { + return clusters.stream() + .filter(c -> c.type() == ClusterSpec.Type.container) + .map(ClusterSpec::id) + .collect(Collectors.toUnmodifiableList()); + } + } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java index 21bfc1b6886..90ca8ef4d33 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java @@ -62,14 +62,14 @@ public class NodeRepositoryProvisioner implements Provisioner { this.nodeRepository = nodeRepository; this.capacityPolicies = new CapacityPolicies(zone, flavors); this.zone = zone; + this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService)); this.preparer = new Preparer(nodeRepository, zone.environment() == Environment.prod ? SPARE_CAPACITY_PROD : SPARE_CAPACITY_NONPROD, - provisionServiceProvider.getHostProvisioner(), - provisionServiceProvider.getHostResourcesCalculator(), - Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource)); - this.activator = new Activator(nodeRepository); - this.loadBalancerProvisioner = provisionServiceProvider.getLoadBalancerService().map(lbService -> - new LoadBalancerProvisioner(nodeRepository, lbService)); + provisionServiceProvider.getHostProvisioner(), + provisionServiceProvider.getHostResourcesCalculator(), + Flags.ENABLE_DYNAMIC_PROVISIONING.bindTo(flagSource), + loadBalancerProvisioner); + this.activator = new Activator(nodeRepository, loadBalancerProvisioner); } /** @@ -112,14 +112,6 @@ public class NodeRepositoryProvisioner implements Provisioner { public void activate(NestedTransaction transaction, ApplicationId application, Collection<HostSpec> hosts) { validate(hosts); activator.activate(application, hosts, transaction); - transaction.onCommitted(() -> { - try { - loadBalancerProvisioner.ifPresent(lbProvisioner -> lbProvisioner.provision(application)); - } catch (Exception e) { - log.log(LogLevel.ERROR, "Failed to provision load balancer for application " + - application.toShortString(), e); - } - }); } @Override diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java index ca958f15c69..31ec964dceb 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java @@ -24,15 +24,24 @@ class Preparer { private final NodeRepository nodeRepository; private final GroupPreparer groupPreparer; + private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner; private final int spareCount; public Preparer(NodeRepository nodeRepository, int spareCount, Optional<HostProvisioner> hostProvisioner, - HostResourcesCalculator hostResourcesCalculator, BooleanFlag dynamicProvisioningEnabled) { + HostResourcesCalculator hostResourcesCalculator, BooleanFlag dynamicProvisioningEnabled, + Optional<LoadBalancerProvisioner> loadBalancerProvisioner) { this.nodeRepository = nodeRepository; this.spareCount = spareCount; + this.loadBalancerProvisioner = loadBalancerProvisioner; this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, hostResourcesCalculator, dynamicProvisioningEnabled); } + /** Prepare all required resources for the given application and cluster */ + public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) { + prepareLoadBalancer(application, cluster, requestedNodes); + return prepareNodes(application, cluster, requestedNodes, wantedGroups); + } + /** * Ensure sufficient nodes are reserved or active for the given application and cluster * @@ -41,7 +50,7 @@ class Preparer { // Note: This operation may make persisted changes to the set of reserved and inactive nodes, // but it may not change the set of active nodes, as the active nodes must stay in sync with the // active config model which is changed on activate - public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) { + public List<Node> prepareNodes(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes, int wantedGroups) { List<Node> surplusNodes = findNodesInRemovableGroups(application, cluster, wantedGroups); MutableInteger highestIndex = new MutableInteger(findHighestIndex(application, cluster)); @@ -58,6 +67,11 @@ class Preparer { return acceptedNodes; } + /** Prepare a load balancer for given application and cluster */ + public void prepareLoadBalancer(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) { + loadBalancerProvisioner.ifPresent(provisioner -> provisioner.prepare(application, cluster, requestedNodes)); + } + /** * Returns a list of the nodes which are * in groups with index number above or equal the group count diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java index d31834567ab..bfbf7775031 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/LoadBalancersResponse.java @@ -55,6 +55,8 @@ public class LoadBalancersResponse extends HttpResponse { loadBalancers().forEach(lb -> { Cursor lbObject = loadBalancerArray.addObject(); lbObject.setString("id", lb.id().serializedForm()); + lbObject.setString("state", lb.state().name()); + lbObject.setLong("changedAt", lb.changedAt().toEpochMilli()); lbObject.setString("application", lb.id().application().application().value()); lbObject.setString("tenant", lb.id().application().tenant().value()); lbObject.setString("instance", lb.id().application().instance().value()); @@ -76,9 +78,9 @@ public class LoadBalancersResponse extends HttpResponse { realObject.setLong("port", real.port()); }); - lbObject.setArray("rotations"); // To avoid changing the API. This can be removed when clients stop expecting this - - lbObject.setBool("inactive", lb.inactive()); + // TODO(mpolden): The following fields preserves API compatibility. These can be removed once clients stop expecting them + lbObject.setArray("rotations"); + lbObject.setBool("inactive", lb.state() == LoadBalancer.State.inactive); }); new JsonFormat(true).encode(stream, slime); diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java index 2ab916e6375..a591217f5d5 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java @@ -148,8 +148,6 @@ class NodesResponse extends HttpResponse { object.setString("canonicalFlavor", node.flavor().canonicalName()); object.setDouble("minDiskAvailableGb", node.flavor().getMinDiskAvailableGb()); object.setDouble("minMainMemoryAvailableGb", node.flavor().getMinMainMemoryAvailableGb()); - if (node.flavor().getDescription() != null && ! node.flavor().getDescription().isEmpty()) - object.setString("description", node.flavor().getDescription()); object.setDouble("minCpuCores", node.flavor().getMinCpuCores()); if (node.flavor().cost() > 0) object.setLong("cost", node.flavor().cost()); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java index d7942cdb6e7..c0ea64d4c68 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirerTest.java @@ -21,6 +21,8 @@ import java.util.function.Supplier; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; /** @@ -49,8 +51,8 @@ public class LoadBalancerExpirerTest { // Remove one application deactivates load balancers for that application removeApplication(app1); - assertTrue(loadBalancers.get().get(lb1).inactive()); - assertFalse(loadBalancers.get().get(lb2).inactive()); + assertSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb1).state()); + assertNotSame(LoadBalancer.State.inactive, loadBalancers.get().get(lb2).state()); // Expirer defers removal while nodes are still allocated to application expirer.maintain(); @@ -62,12 +64,12 @@ public class LoadBalancerExpirerTest { assertFalse("Inactive load balancer removed", tester.loadBalancerService().instances().containsKey(lb1)); // Active load balancer is left alone - assertFalse(loadBalancers.get().get(lb2).inactive()); + assertSame(LoadBalancer.State.active, loadBalancers.get().get(lb2).state()); assertTrue("Active load balancer is not removed", tester.loadBalancerService().instances().containsKey(lb2)); } private void dirtyNodesOf(ApplicationId application) { - tester.nodeRepository().setDirty(tester.nodeRepository().getNodes(application), Agent.system, "unit-test"); + tester.nodeRepository().setDirty(tester.nodeRepository().getNodes(application), Agent.system, this.getClass().getSimpleName()); } private void removeApplication(ApplicationId application) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java index 460764b50db..b78b4120b81 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/LoadBalancerSerializerTest.java @@ -12,9 +12,13 @@ import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance; import com.yahoo.vespa.hosted.provision.lb.Real; import org.junit.Test; +import java.nio.charset.StandardCharsets; +import java.time.Instant; import java.util.Optional; +import static java.time.temporal.ChronoUnit.MILLIS; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; /** * @author mpolden @@ -23,31 +27,61 @@ public class LoadBalancerSerializerTest { @Test public void test_serialization() { - LoadBalancer loadBalancer = new LoadBalancer(new LoadBalancerId(ApplicationId.from("tenant1", - "application1", - "default"), - ClusterSpec.Id.from("qrs")), - new LoadBalancerInstance( - HostName.from("lb-host"), - Optional.of(new DnsZone("zone-id-1")), - ImmutableSet.of(4080, 4443), - ImmutableSet.of("10.2.3.4/24"), - ImmutableSet.of(new Real(HostName.from("real-1"), - "127.0.0.1", - 4080), - new Real(HostName.from("real-2"), - "127.0.0.2", - 4080))), - false); - - LoadBalancer serialized = LoadBalancerSerializer.fromJson(LoadBalancerSerializer.toJson(loadBalancer)); + var now = Instant.now(); + var loadBalancer = new LoadBalancer(new LoadBalancerId(ApplicationId.from("tenant1", + "application1", + "default"), + ClusterSpec.Id.from("qrs")), + new LoadBalancerInstance( + HostName.from("lb-host"), + Optional.of(new DnsZone("zone-id-1")), + ImmutableSet.of(4080, 4443), + ImmutableSet.of("10.2.3.4/24"), + ImmutableSet.of(new Real(HostName.from("real-1"), + "127.0.0.1", + 4080), + new Real(HostName.from("real-2"), + "127.0.0.2", + 4080))), + LoadBalancer.State.active, + now); + + var serialized = LoadBalancerSerializer.fromJson(LoadBalancerSerializer.toJson(loadBalancer), now); assertEquals(loadBalancer.id(), serialized.id()); assertEquals(loadBalancer.instance().hostname(), serialized.instance().hostname()); assertEquals(loadBalancer.instance().dnsZone(), serialized.instance().dnsZone()); assertEquals(loadBalancer.instance().ports(), serialized.instance().ports()); assertEquals(loadBalancer.instance().networks(), serialized.instance().networks()); - assertEquals(loadBalancer.inactive(), serialized.inactive()); + assertEquals(loadBalancer.state(), serialized.state()); + assertEquals(loadBalancer.changedAt().truncatedTo(MILLIS), serialized.changedAt()); assertEquals(loadBalancer.instance().reals(), serialized.instance().reals()); } + @Test + public void test_serialization_legacy() { // TODO(mpolden): Remove after June 2019 + var now = Instant.now(); + + var deserialized = LoadBalancerSerializer.fromJson(legacyJson(true).getBytes(StandardCharsets.UTF_8), now); + assertSame(LoadBalancer.State.inactive, deserialized.state()); + assertEquals(now, deserialized.changedAt()); + + deserialized = LoadBalancerSerializer.fromJson(legacyJson(false).getBytes(StandardCharsets.UTF_8), now); + assertSame(LoadBalancer.State.active, deserialized.state()); + } + + private static String legacyJson(boolean inactive) { + return "{\n" + + " \"id\": \"tenant1:application1:default:qrs\",\n" + + " \"hostname\": \"lb-host\",\n" + + " \"dnsZone\": \"zone-id-1\",\n" + + " \"ports\": [\n" + + " 4080,\n" + + " 4443\n" + + " ],\n" + + " \"networks\": [],\n" + + " \"reals\": [],\n" + + " \"inactive\": " + inactive + "\n" + + "}\n"; + } + } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java index f97460713a5..6d94e4ab992 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java @@ -4,11 +4,11 @@ package com.yahoo.vespa.hosted.provision.provisioning; import com.google.common.collect.Iterators; import com.yahoo.component.Version; import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.Capacity; import com.yahoo.config.provision.ClusterSpec; -import com.yahoo.config.provision.NodeResources; import com.yahoo.config.provision.HostName; import com.yahoo.config.provision.HostSpec; -import com.yahoo.config.provision.RotationName; +import com.yahoo.config.provision.NodeResources; import com.yahoo.transaction.NestedTransaction; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.lb.LoadBalancer; @@ -26,7 +26,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; /** @@ -41,26 +41,29 @@ public class LoadBalancerProvisionerTest { @Test public void provision_load_balancer() { + Supplier<List<LoadBalancer>> lbApp1 = () -> tester.nodeRepository().loadBalancers().owner(app1).asList(); + Supplier<List<LoadBalancer>> lbApp2 = () -> tester.nodeRepository().loadBalancers().owner(app2).asList(); ClusterSpec.Id containerCluster1 = ClusterSpec.Id.from("qrs1"); ClusterSpec.Id contentCluster = ClusterSpec.Id.from("content"); - Set<RotationName> rotationsCluster1 = Set.of(RotationName.from("r1-1"), RotationName.from("r1-2")); - tester.activate(app1, prepare(app1, - clusterRequest(ClusterSpec.Type.container, containerCluster1, rotationsCluster1), - clusterRequest(ClusterSpec.Type.content, contentCluster))); - tester.activate(app2, prepare(app2, - clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")))); // Provision a load balancer for each application - Supplier<List<LoadBalancer>> loadBalancers = () -> tester.nodeRepository().loadBalancers().owner(app1).asList(); - assertEquals(1, loadBalancers.get().size()); - - assertEquals(app1, loadBalancers.get().get(0).id().application()); - assertEquals(containerCluster1, loadBalancers.get().get(0).id().cluster()); - assertEquals(Collections.singleton(4443), loadBalancers.get().get(0).instance().ports()); - assertEquals("127.0.0.1", get(loadBalancers.get().get(0).instance().reals(), 0).ipAddress()); - assertEquals(4080, get(loadBalancers.get().get(0).instance().reals(), 0).port()); - assertEquals("127.0.0.2", get(loadBalancers.get().get(0).instance().reals(), 1).ipAddress()); - assertEquals(4080, get(loadBalancers.get().get(0).instance().reals(), 1).port()); + var nodes = prepare(app1, + clusterRequest(ClusterSpec.Type.container, containerCluster1), + clusterRequest(ClusterSpec.Type.content, contentCluster)); + assertEquals(1, lbApp1.get().size()); + assertEquals("Prepare provisions load balancer with 0 reals", Set.of(), lbApp1.get().get(0).instance().reals()); + tester.activate(app1, nodes); + tester.activate(app2, prepare(app2, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs")))); + assertEquals(1, lbApp2.get().size()); + + // Reals are configured after activation + assertEquals(app1, lbApp1.get().get(0).id().application()); + assertEquals(containerCluster1, lbApp1.get().get(0).id().cluster()); + assertEquals(Collections.singleton(4443), lbApp1.get().get(0).instance().ports()); + assertEquals("127.0.0.1", get(lbApp1.get().get(0).instance().reals(), 0).ipAddress()); + assertEquals(4080, get(lbApp1.get().get(0).instance().reals(), 0).port()); + assertEquals("127.0.0.2", get(lbApp1.get().get(0).instance().reals(), 1).ipAddress()); + assertEquals(4080, get(lbApp1.get().get(0).instance().reals(), 1).port()); // A container is failed Supplier<List<Node>> containers = () -> tester.getNodes(app1).type(ClusterSpec.Type.container).asList(); @@ -79,17 +82,17 @@ public class LoadBalancerProvisionerTest { .noneMatch(hostname -> hostname.equals(toFail.hostname()))); assertEquals(containers.get().get(0).hostname(), get(loadBalancer.instance().reals(), 0).hostname().value()); assertEquals(containers.get().get(1).hostname(), get(loadBalancer.instance().reals(), 1).hostname().value()); + assertSame("State is unchanged", LoadBalancer.State.active, loadBalancer.state()); // Add another container cluster - Set<RotationName> rotationsCluster2 = Set.of(RotationName.from("r2-1"), RotationName.from("r2-2")); ClusterSpec.Id containerCluster2 = ClusterSpec.Id.from("qrs2"); tester.activate(app1, prepare(app1, - clusterRequest(ClusterSpec.Type.container, containerCluster1, rotationsCluster1), - clusterRequest(ClusterSpec.Type.container, containerCluster2, rotationsCluster2), + clusterRequest(ClusterSpec.Type.container, containerCluster1), + clusterRequest(ClusterSpec.Type.container, containerCluster2), clusterRequest(ClusterSpec.Type.content, contentCluster))); // Load balancer is provisioned for second container cluster - assertEquals(2, loadBalancers.get().size()); + assertEquals(2, lbApp1.get().size()); List<HostName> activeContainers = tester.getNodes(app1, Node.State.active) .type(ClusterSpec.Type.container).asList() .stream() @@ -97,7 +100,7 @@ public class LoadBalancerProvisionerTest { .map(HostName::from) .sorted() .collect(Collectors.toList()); - List<HostName> reals = loadBalancers.get().stream() + List<HostName> reals = lbApp1.get().stream() .map(LoadBalancer::instance) .map(LoadBalancerInstance::reals) .flatMap(Collection::stream) @@ -111,38 +114,35 @@ public class LoadBalancerProvisionerTest { tester.provisioner().remove(removeTransaction, app1); removeTransaction.commit(); - assertEquals(2, loadBalancers.get().size()); - assertTrue("Deactivated load balancers", loadBalancers.get().stream().allMatch(LoadBalancer::inactive)); + assertEquals(2, lbApp1.get().size()); + assertTrue("Deactivated load balancers", lbApp1.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.inactive)); + assertTrue("Load balancers for " + app2 + " remain active", lbApp2.get().stream().allMatch(lb -> lb.state() == LoadBalancer.State.active)); // Application is redeployed with one cluster and load balancer is re-activated tester.activate(app1, prepare(app1, clusterRequest(ClusterSpec.Type.container, containerCluster1), clusterRequest(ClusterSpec.Type.content, contentCluster))); - assertFalse("Re-activated load balancer for " + containerCluster1, - loadBalancers.get().stream() + assertSame("Re-activated load balancer for " + containerCluster1, LoadBalancer.State.active, + lbApp1.get().stream() .filter(lb -> lb.id().cluster().equals(containerCluster1)) + .map(LoadBalancer::state) .findFirst() - .orElseThrow() - .inactive()); - } - - private ClusterSpec clusterRequest(ClusterSpec.Type type, ClusterSpec.Id id) { - return clusterRequest(type, id, Collections.emptySet()); - } - - private ClusterSpec clusterRequest(ClusterSpec.Type type, ClusterSpec.Id id, Set<RotationName> rotations) { - return ClusterSpec.request(type, id, Version.fromString("6.42"), false, rotations); + .orElseThrow()); } private Set<HostSpec> prepare(ApplicationId application, ClusterSpec... specs) { tester.makeReadyNodes(specs.length * 2, "d-1-1-1"); Set<HostSpec> allNodes = new LinkedHashSet<>(); for (ClusterSpec spec : specs) { - allNodes.addAll(tester.prepare(application, spec, 2, 1, new NodeResources(1, 1, 1))); + allNodes.addAll(tester.prepare(application, spec, Capacity.fromCount(2, new NodeResources(1, 1, 1), false, true), 1, false)); } return allNodes; } + private static ClusterSpec clusterRequest(ClusterSpec.Type type, ClusterSpec.Id id) { + return ClusterSpec.request(type, id, Version.fromString("6.42"), false); + } + private static <T> T get(Set<T> set, int position) { return Iterators.get(set.iterator(), position, null); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java index c8051c3bdee..294c153f86f 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java @@ -139,16 +139,21 @@ public class ProvisioningTester { } public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity, int groups) { + return prepare(application, cluster, capacity, groups, true); + } + + public List<HostSpec> prepare(ApplicationId application, ClusterSpec cluster, Capacity capacity, int groups, boolean idempotentPrepare) { Set<String> reservedBefore = toHostNames(nodeRepository.getNodes(application, Node.State.reserved)); Set<String> inactiveBefore = toHostNames(nodeRepository.getNodes(application, Node.State.inactive)); - // prepare twice to ensure idempotence List<HostSpec> hosts1 = provisioner.prepare(application, cluster, capacity, groups, provisionLogger); - List<HostSpec> hosts2 = provisioner.prepare(application, cluster, capacity, groups, provisionLogger); - assertEquals(hosts1, hosts2); + if (idempotentPrepare) { // prepare twice to ensure idempotence + List<HostSpec> hosts2 = provisioner.prepare(application, cluster, capacity, groups, provisionLogger); + assertEquals(hosts1, hosts2); + } Set<String> newlyActivated = toHostNames(nodeRepository.getNodes(application, Node.State.reserved)); newlyActivated.removeAll(reservedBefore); newlyActivated.removeAll(inactiveBefore); - return hosts2; + return hosts1; } public void activate(ApplicationId application, Collection<HostSpec> hosts) { diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java index 6524292f48c..bfb24d30284 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java @@ -831,7 +831,7 @@ public class RestApiTest { @Test public void test_load_balancers() throws Exception { assertFile(new Request("http://localhost:8080/loadbalancers/v1/"), "load-balancers.json"); - assertFile(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant4.application4.instance4"), "load-balancers.json"); + assertFile(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant4.application4.instance4"), "load-balancers-single.json"); assertResponse(new Request("http://localhost:8080/loadbalancers/v1/?application=tenant.nonexistent.default"), "{\"loadBalancers\":[]}"); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/cfg1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/cfg1.json index e23f7129ae2..fd553a97ea4 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/cfg1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/cfg1.json @@ -9,7 +9,6 @@ "canonicalFlavor": "default", "minDiskAvailableGb": 400.0, "minMainMemoryAvailableGb": 16.0, - "description": "Flavor-name-is-default", "minCpuCores": 2.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/cfg2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/cfg2.json index 8e1accd65a2..aa818a9cf42 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/cfg2.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/cfg2.json @@ -9,7 +9,6 @@ "canonicalFlavor": "default", "minDiskAvailableGb": 400.0, "minMainMemoryAvailableGb": 16.0, - "description": "Flavor-name-is-default", "minCpuCores": 2.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/controller1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/controller1.json index a5d2a7a37dd..bfa34bc0517 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/controller1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/controller1.json @@ -9,7 +9,6 @@ "canonicalFlavor": "default", "minDiskAvailableGb": 400.0, "minMainMemoryAvailableGb": 16.0, - "description": "Flavor-name-is-default", "minCpuCores": 2.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade-complete.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade-complete.json index 5cbd372385a..fc91c883441 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade-complete.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade-complete.json @@ -9,7 +9,6 @@ "canonicalFlavor": "large", "minDiskAvailableGb": 1600.0, "minMainMemoryAvailableGb": 32.0, - "description": "Flavor-name-is-large", "minCpuCores": 4.0, "fastDisk": true, "bandwidth": 0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade.json index 5cbd372385a..fc91c883441 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1-os-upgrade.json @@ -9,7 +9,6 @@ "canonicalFlavor": "large", "minDiskAvailableGb": 1600.0, "minMainMemoryAvailableGb": 32.0, - "description": "Flavor-name-is-large", "minCpuCores": 4.0, "fastDisk": true, "bandwidth": 0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1.json index cf190ff36bc..f59af799f37 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node1.json @@ -9,7 +9,6 @@ "canonicalFlavor": "large", "minDiskAvailableGb": 1600.0, "minMainMemoryAvailableGb": 32.0, - "description": "Flavor-name-is-large", "minCpuCores": 4.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node2.json index 4ffedbf01d5..a01f4372fd8 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node2.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node2.json @@ -9,7 +9,6 @@ "canonicalFlavor": "large", "minDiskAvailableGb": 1600.0, "minMainMemoryAvailableGb": 32.0, - "description": "Flavor-name-is-large", "minCpuCores": 4.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node3.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node3.json index 396a645ea3b..44a11c98da2 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node3.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node3.json @@ -9,7 +9,6 @@ "canonicalFlavor": "large", "minDiskAvailableGb": 1600.0, "minMainMemoryAvailableGb": 32.0, - "description": "Flavor-name-is-large", "minCpuCores": 4.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node4.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node4.json index 146af5998bd..b3ec9aa0093 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node4.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node4.json @@ -9,7 +9,6 @@ "canonicalFlavor": "large", "minDiskAvailableGb": 1600.0, "minMainMemoryAvailableGb": 32.0, - "description": "Flavor-name-is-large", "minCpuCores": 4.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node5.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node5.json index 5dadfe68845..963d485ac70 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node5.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/docker-node5.json @@ -9,7 +9,6 @@ "canonicalFlavor": "large", "minDiskAvailableGb": 1600.0, "minMainMemoryAvailableGb": 32.0, - "description": "Flavor-name-is-large", "minCpuCores": 4.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/dockerhost1-with-firmware-data.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/dockerhost1-with-firmware-data.json index 637a7cc858d..efecd510266 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/dockerhost1-with-firmware-data.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/dockerhost1-with-firmware-data.json @@ -9,7 +9,6 @@ "canonicalFlavor": "large", "minDiskAvailableGb": 1600.0, "minMainMemoryAvailableGb": 32.0, - "description": "Flavor-name-is-large", "minCpuCores": 4.0, "fastDisk": true, "bandwidth": 0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json new file mode 100644 index 00000000000..67d2c3bfa4b --- /dev/null +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers-single.json @@ -0,0 +1,36 @@ +{ + "loadBalancers": [ + { + "id": "tenant4:application4:instance4:id4", + "state": "active", + "changedAt": 123, + "application": "application4", + "tenant": "tenant4", + "instance": "instance4", + "cluster": "id4", + "hostname": "lb-tenant4.application4.instance4-id4", + "dnsZone": "zone-id-1", + "networks": [ + "10.2.3.0/24", + "10.4.5.0/24" + ], + "ports": [ + 4443 + ], + "reals": [ + { + "hostname": "host13.yahoo.com", + "ipAddress": "127.0.13.1", + "port": 4080 + }, + { + "hostname": "host14.yahoo.com", + "ipAddress": "127.0.14.1", + "port": 4080 + } + ], + "rotations": [], + "inactive": false + } + ] +} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json index d2c4d0ac857..36d4de598e2 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/load-balancers.json @@ -1,7 +1,30 @@ { "loadBalancers": [ { + "id": "tenant1:application1:instance1:id1", + "state": "reserved", + "changedAt": 123, + "application": "application1", + "tenant": "tenant1", + "instance": "instance1", + "cluster": "id1", + "hostname": "lb-tenant1.application1.instance1-id1", + "dnsZone": "zone-id-1", + "networks": [ + "10.2.3.0/24", + "10.4.5.0/24" + ], + "ports": [ + 4443 + ], + "reals": [], + "rotations": [], + "inactive": false + }, + { "id": "tenant4:application4:instance4:id4", + "state": "active", + "changedAt": 123, "application": "application4", "tenant": "tenant4", "instance": "instance4", diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node4-after-changes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node4-after-changes.json index c9983b3c996..f0c937d20f3 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node4-after-changes.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node4-after-changes.json @@ -10,7 +10,6 @@ "canonicalFlavor": "d-2-8-100", "minDiskAvailableGb": 100.0, "minMainMemoryAvailableGb": 8.0, - "description": "Flavor-name-is-d-2-8-100", "minCpuCores": 2.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node8.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node8.json index ab69b6b1d5a..e5a5c7a9520 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node8.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node8.json @@ -9,7 +9,6 @@ "canonicalFlavor": "default", "minDiskAvailableGb": 400.0, "minMainMemoryAvailableGb": 16.0, - "description": "Flavor-name-is-default", "minCpuCores": 2.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node9.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node9.json index 271ff3feef1..561cab22f85 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node9.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/node9.json @@ -9,7 +9,6 @@ "canonicalFlavor": "large", "minDiskAvailableGb": 2000.0, "minMainMemoryAvailableGb": 128.0, - "description": "Flavor-name-is-large-variant", "minCpuCores": 64.0, "fastDisk": true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/parent1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/parent1.json index 9823ffcc14f..42cef1c3c83 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/parent1.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/parent1.json @@ -9,7 +9,6 @@ "canonicalFlavor": "[vcpu: 2.0, memory: 8.0 Gb, disk 50.0 Gb]", "minDiskAvailableGb": 400.0, "minMainMemoryAvailableGb": 16.0, - "description": "Flavor-name-is-default", "minCpuCores": 2.0, "fastDisk":true, "bandwidth":0.0, diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/parent2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/parent2.json index ec319edb170..28bb960eb14 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/parent2.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/responses/parent2.json @@ -9,7 +9,6 @@ "canonicalFlavor": "large", "minDiskAvailableGb": 2000.0, "minMainMemoryAvailableGb": 128.0, - "description": "Flavor-name-is-large-variant", "minCpuCores": 64.0, "fastDisk":true, "bandwidth":0.0, diff --git a/parent/pom.xml b/parent/pom.xml index d8ae9a35261..1855553bc20 100644 --- a/parent/pom.xml +++ b/parent/pom.xml @@ -92,7 +92,7 @@ <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> - <version>3.1.0</version> + <version>3.1.1</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> @@ -441,9 +441,14 @@ <version>${asm.version}</version> </dependency> <dependency> - <groupId>com.goldmansachs</groupId> - <artifactId>gs-collections</artifactId> - <version>6.1.0</version> + <groupId>org.eclipse.collections</groupId> + <artifactId>eclipse-collections</artifactId> + <version>9.2.0</version> + </dependency> + <dependency> + <groupId>org.eclipse.collections</groupId> + <artifactId>eclipse-collections-api</artifactId> + <version>9.2.0</version> </dependency> <dependency> <groupId>com.infradna.tool</groupId> diff --git a/predicate-search/pom.xml b/predicate-search/pom.xml index fa1bee2fe28..778ede93366 100644 --- a/predicate-search/pom.xml +++ b/predicate-search/pom.xml @@ -34,8 +34,12 @@ <artifactId>guava</artifactId> </dependency> <dependency> - <groupId>com.goldmansachs</groupId> - <artifactId>gs-collections</artifactId> + <groupId>org.eclipse.collections</groupId> + <artifactId>eclipse-collections</artifactId> + </dependency> + <dependency> + <groupId>org.eclipse.collections</groupId> + <artifactId>eclipse-collections-api</artifactId> </dependency> <dependency> <groupId>io.airlift</groupId> diff --git a/predicate-search/src/main/java/com/yahoo/search/predicate/index/CachedPostingListCounter.java b/predicate-search/src/main/java/com/yahoo/search/predicate/index/CachedPostingListCounter.java index 91599da5483..9356e86aa2f 100644 --- a/predicate-search/src/main/java/com/yahoo/search/predicate/index/CachedPostingListCounter.java +++ b/predicate-search/src/main/java/com/yahoo/search/predicate/index/CachedPostingListCounter.java @@ -2,9 +2,9 @@ package com.yahoo.search.predicate.index; import com.google.common.collect.MinMaxPriorityQueue; -import com.gs.collections.api.tuple.primitive.ObjectLongPair; -import com.gs.collections.impl.map.mutable.primitive.ObjectIntHashMap; -import com.gs.collections.impl.map.mutable.primitive.ObjectLongHashMap; +import org.eclipse.collections.api.tuple.primitive.ObjectLongPair; +import org.eclipse.collections.impl.map.mutable.primitive.ObjectIntHashMap; +import org.eclipse.collections.impl.map.mutable.primitive.ObjectLongHashMap; import java.util.ArrayList; import java.util.Arrays; @@ -119,7 +119,7 @@ public class CachedPostingListCounter { private static class Entry implements Comparable<Entry> { public final int[] docIds; - public final double cost; + final double cost; private Entry(int[] docIds, long frequency) { this.docIds = docIds; diff --git a/predicate-search/src/main/java/com/yahoo/search/predicate/index/SimpleIndex.java b/predicate-search/src/main/java/com/yahoo/search/predicate/index/SimpleIndex.java index 64583273e77..3e1ed7ad9e4 100644 --- a/predicate-search/src/main/java/com/yahoo/search/predicate/index/SimpleIndex.java +++ b/predicate-search/src/main/java/com/yahoo/search/predicate/index/SimpleIndex.java @@ -1,10 +1,10 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.predicate.index; -import com.gs.collections.api.map.primitive.LongObjectMap; -import com.gs.collections.api.tuple.primitive.LongObjectPair; -import com.gs.collections.impl.map.mutable.primitive.LongObjectHashMap; import com.yahoo.search.predicate.serialization.SerializationHelper; +import org.eclipse.collections.api.map.primitive.LongObjectMap; +import org.eclipse.collections.api.tuple.primitive.LongObjectPair; +import org.eclipse.collections.impl.map.mutable.primitive.LongObjectHashMap; import java.io.DataInputStream; import java.io.DataOutputStream; diff --git a/predicate-search/src/main/java/com/yahoo/search/predicate/index/conjunction/ConjunctionIndex.java b/predicate-search/src/main/java/com/yahoo/search/predicate/index/conjunction/ConjunctionIndex.java index 5a100ea9cf5..d062af43f22 100644 --- a/predicate-search/src/main/java/com/yahoo/search/predicate/index/conjunction/ConjunctionIndex.java +++ b/predicate-search/src/main/java/com/yahoo/search/predicate/index/conjunction/ConjunctionIndex.java @@ -1,17 +1,17 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.predicate.index.conjunction; -import com.gs.collections.api.map.primitive.IntObjectMap; -import com.gs.collections.api.map.primitive.LongObjectMap; -import com.gs.collections.api.tuple.primitive.IntObjectPair; -import com.gs.collections.api.tuple.primitive.LongObjectPair; -import com.gs.collections.impl.map.mutable.primitive.IntObjectHashMap; -import com.gs.collections.impl.map.mutable.primitive.LongObjectHashMap; import com.yahoo.document.predicate.FeatureConjunction; import com.yahoo.search.predicate.PredicateQuery; import com.yahoo.search.predicate.SubqueryBitmap; import com.yahoo.search.predicate.serialization.SerializationHelper; import com.yahoo.search.predicate.utils.PrimitiveArraySorter; +import org.eclipse.collections.api.map.primitive.IntObjectMap; +import org.eclipse.collections.api.map.primitive.LongObjectMap; +import org.eclipse.collections.api.tuple.primitive.IntObjectPair; +import org.eclipse.collections.api.tuple.primitive.LongObjectPair; +import org.eclipse.collections.impl.map.mutable.primitive.IntObjectHashMap; +import org.eclipse.collections.impl.map.mutable.primitive.LongObjectHashMap; import java.io.DataInputStream; import java.io.DataOutputStream; diff --git a/predicate-search/src/main/java/com/yahoo/search/predicate/index/conjunction/ConjunctionIndexBuilder.java b/predicate-search/src/main/java/com/yahoo/search/predicate/index/conjunction/ConjunctionIndexBuilder.java index a6a03177018..8e3261a4cf8 100644 --- a/predicate-search/src/main/java/com/yahoo/search/predicate/index/conjunction/ConjunctionIndexBuilder.java +++ b/predicate-search/src/main/java/com/yahoo/search/predicate/index/conjunction/ConjunctionIndexBuilder.java @@ -3,9 +3,9 @@ package com.yahoo.search.predicate.index.conjunction; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; -import com.gs.collections.api.map.primitive.IntObjectMap; -import com.gs.collections.impl.map.mutable.primitive.IntObjectHashMap; -import com.gs.collections.impl.map.mutable.primitive.LongObjectHashMap; +import org.eclipse.collections.api.map.primitive.IntObjectMap; +import org.eclipse.collections.impl.map.mutable.primitive.IntObjectHashMap; +import org.eclipse.collections.impl.map.mutable.primitive.LongObjectHashMap; import java.util.ArrayList; import java.util.HashMap; diff --git a/predicate-search/src/test/java/com/yahoo/search/predicate/index/CachedPostingListCounterTest.java b/predicate-search/src/test/java/com/yahoo/search/predicate/index/CachedPostingListCounterTest.java index a3dfd00149c..31777959704 100644 --- a/predicate-search/src/test/java/com/yahoo/search/predicate/index/CachedPostingListCounterTest.java +++ b/predicate-search/src/test/java/com/yahoo/search/predicate/index/CachedPostingListCounterTest.java @@ -1,8 +1,8 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.search.predicate.index; -import com.gs.collections.impl.map.mutable.primitive.ObjectIntHashMap; import org.apache.commons.lang.ArrayUtils; +import org.eclipse.collections.impl.map.mutable.primitive.ObjectIntHashMap; import org.junit.Test; import java.util.ArrayList; diff --git a/searchcore/src/apps/verify_ranksetup/verify_ranksetup.cpp b/searchcore/src/apps/verify_ranksetup/verify_ranksetup.cpp index f60863ef0b0..721ee9978b0 100644 --- a/searchcore/src/apps/verify_ranksetup/verify_ranksetup.cpp +++ b/searchcore/src/apps/verify_ranksetup/verify_ranksetup.cpp @@ -12,7 +12,6 @@ #include <vespa/eval/tensor/default_tensor_engine.h> #include <vespa/searchcommon/common/schemaconfigurer.h> #include <vespa/searchcore/config/config-ranking-constants.h> -#include <vespa/searchcore/proton/matching/error_constant_value.h> #include <vespa/searchcore/proton/matching/indexenvironment.h> #include <vespa/searchlib/features/setup.h> #include <vespa/searchlib/fef/fef.h> @@ -34,11 +33,11 @@ using vespa::config::search::IndexschemaConfig; using vespa::config::search::RankProfilesConfig; using vespa::config::search::core::RankingConstantsConfig; using vespalib::eval::ConstantValue; -using vespalib::eval::ErrorValue; using vespalib::eval::TensorSpec; using vespalib::eval::ValueType; using vespalib::tensor::DefaultTensorEngine; using vespalib::eval::SimpleConstantValue; +using vespalib::eval::BadConstantValue; class App : public FastOS_Application { @@ -61,13 +60,17 @@ struct DummyConstantValueRepo : IConstantValueRepo { DummyConstantValueRepo(const RankingConstantsConfig &cfg_in) : cfg(cfg_in) {} virtual vespalib::eval::ConstantValue::UP getConstant(const vespalib::string &name) const override { for (const auto &entry: cfg.constant) { - if (entry.name == name) { + if (entry.name == name) { const auto &engine = DefaultTensorEngine::ref(); - auto tensor = engine.from_spec(TensorSpec(entry.type)); - return std::make_unique<SimpleConstantValue>(std::move(tensor)); + try { + auto tensor = engine.from_spec(TensorSpec(entry.type)); + return std::make_unique<SimpleConstantValue>(std::move(tensor)); + } catch (std::exception &) { + return std::make_unique<BadConstantValue>(); + } } } - return std::make_unique<SimpleConstantValue>(std::make_unique<ErrorValue>()); + return vespalib::eval::ConstantValue::UP(nullptr); } }; diff --git a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp index aba879e5a44..3154b420789 100644 --- a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp +++ b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp @@ -9,7 +9,6 @@ #include <vespa/searchcore/proton/docsummary/summarymanager.h> #include <vespa/searchcore/proton/documentmetastore/documentmetastore.h> #include <vespa/searchcore/proton/documentmetastore/lidreusedelayer.h> -#include <vespa/searchcore/proton/matching/error_constant_value.h> #include <vespa/searchcore/proton/index/index_writer.h> #include <vespa/searchcore/proton/index/indexmanager.h> #include <vespa/searchcore/proton/reprocessing/attribute_reprocessing_initializer.h> @@ -138,7 +137,7 @@ ViewSet::~ViewSet() {} struct EmptyConstantValueFactory : public vespalib::eval::ConstantValueFactory { virtual vespalib::eval::ConstantValue::UP create(const vespalib::string &, const vespalib::string &) const override { - return std::make_unique<ErrorConstantValue>(); + return vespalib::eval::ConstantValue::UP(nullptr); } }; diff --git a/searchcore/src/tests/proton/matching/constant_value_repo/constant_value_repo_test.cpp b/searchcore/src/tests/proton/matching/constant_value_repo/constant_value_repo_test.cpp index acc16f41872..68959d8e20f 100644 --- a/searchcore/src/tests/proton/matching/constant_value_repo/constant_value_repo_test.cpp +++ b/searchcore/src/tests/proton/matching/constant_value_repo/constant_value_repo_test.cpp @@ -3,7 +3,6 @@ #include <vespa/vespalib/testkit/test_kit.h> #include <vespa/searchcore/proton/matching/constant_value_repo.h> -#include <vespa/searchcore/proton/matching/error_constant_value.h> #include <vespa/eval/eval/value_cache/constant_value.h> using namespace proton::matching; @@ -36,7 +35,7 @@ public: if (itr != _map.end()) { return std::make_unique<DoubleConstantValue>(itr->second); } - return std::make_unique<ErrorConstantValue>(); + return std::make_unique<BadConstantValue>(); } }; @@ -58,14 +57,14 @@ TEST_F("require that constant value can be retrieved from repo", Fixture) EXPECT_EQUAL(3, f.repo.getConstant("foo")->value().as_double()); } -TEST_F("require that non-existing constant value in repo returns error value", Fixture) +TEST_F("require that non-existing constant value in repo returns nullptr", Fixture) { - EXPECT_TRUE(f.repo.getConstant("none")->value().is_error()); + EXPECT_TRUE(f.repo.getConstant("none").get() == nullptr); } -TEST_F("require that non-existing constant value in factory returns error value", Fixture) +TEST_F("require that non-existing constant value in factory returns bad constant", Fixture) { - EXPECT_TRUE(f.repo.getConstant("bar")->value().is_error()); + EXPECT_TRUE(f.repo.getConstant("bar")->type().is_error()); } TEST_F("require that reconfigure replaces existing constant values in repo", Fixture) @@ -73,7 +72,7 @@ TEST_F("require that reconfigure replaces existing constant values in repo", Fix f.repo.reconfigure(RankingConstants({{"bar", "double", "path_3"}, {"baz", "double", "path_2"}})); f.factory.add("path_3", "double", 7); - EXPECT_TRUE(f.repo.getConstant("foo")->value().is_error()); + EXPECT_TRUE(f.repo.getConstant("foo").get() == nullptr); EXPECT_EQUAL(7, f.repo.getConstant("bar")->value().as_double()); EXPECT_EQUAL(5, f.repo.getConstant("baz")->value().as_double()); } diff --git a/searchcore/src/tests/proton/matching/handle_recorder/handle_recorder_test.cpp b/searchcore/src/tests/proton/matching/handle_recorder/handle_recorder_test.cpp index b384f126f05..785d4887e72 100644 --- a/searchcore/src/tests/proton/matching/handle_recorder/handle_recorder_test.cpp +++ b/searchcore/src/tests/proton/matching/handle_recorder/handle_recorder_test.cpp @@ -17,8 +17,8 @@ using namespace proton::matching; using HandleMap = HandleRecorder::HandleMap; constexpr MatchDataDetails NormalMask = MatchDataDetails::Normal; -constexpr MatchDataDetails CheapMask = MatchDataDetails::Cheap; -constexpr MatchDataDetails BothMask = static_cast<MatchDataDetails>(static_cast<int>(NormalMask) | static_cast<int>(CheapMask)); +constexpr MatchDataDetails InterleavedMask = MatchDataDetails::Interleaved; +constexpr MatchDataDetails BothMask = static_cast<MatchDataDetails>(static_cast<int>(NormalMask) | static_cast<int>(InterleavedMask)); void register_normal_handle(TermFieldHandle handle) @@ -27,22 +27,22 @@ register_normal_handle(TermFieldHandle handle) } void -register_cheap_handle(TermFieldHandle handle) +register_interleaved_features_handle(TermFieldHandle handle) { - HandleRecorder::register_handle(handle, MatchDataDetails::Cheap); + HandleRecorder::register_handle(handle, MatchDataDetails::Interleaved); } -TEST(HandleRecorderTest, can_record_both_normal_and_cheap_handles) +TEST(HandleRecorderTest, can_record_both_normal_and_interleaved_features_handles) { HandleRecorder recorder; { HandleRecorder::Binder binder(recorder); register_normal_handle(3); - register_cheap_handle(5); + register_interleaved_features_handle(5); register_normal_handle(7); } - EXPECT_EQ(HandleMap({{3, NormalMask}, {5, CheapMask}, {7, NormalMask}}), recorder.get_handles()); - EXPECT_EQ("normal: [3,7], cheap: [5]", recorder.to_string()); + EXPECT_EQ(HandleMap({{3, NormalMask}, {5, InterleavedMask}, {7, NormalMask}}), recorder.get_handles()); + EXPECT_EQ("normal: [3,7], interleaved: [5]", recorder.to_string()); } TEST(HandleRecorderTest, the_same_handle_can_be_in_both_normal_and_cheap_set) @@ -51,7 +51,7 @@ TEST(HandleRecorderTest, the_same_handle_can_be_in_both_normal_and_cheap_set) { HandleRecorder::Binder binder(recorder); register_normal_handle(3); - register_cheap_handle(3); + register_interleaved_features_handle(3); } EXPECT_EQ(HandleMap({{3, BothMask}}), recorder.get_handles()); } @@ -59,11 +59,11 @@ TEST(HandleRecorderTest, the_same_handle_can_be_in_both_normal_and_cheap_set) namespace { void check_tagging(const TermFieldMatchData &tfmd, bool exp_not_needed, - bool exp_needs_normal_features, bool exp_needs_cheap_features) + bool exp_needs_normal_features, bool exp_needs_interleaved_features) { EXPECT_EQ(tfmd.isNotNeeded(), exp_not_needed); EXPECT_EQ(tfmd.needs_normal_features(), exp_needs_normal_features); - EXPECT_EQ(tfmd.needs_cheap_features(), exp_needs_cheap_features); + EXPECT_EQ(tfmd.needs_interleaved_features(), exp_needs_interleaved_features); } } @@ -74,9 +74,9 @@ TEST(HandleRecorderTest, tagging_of_matchdata_works) { HandleRecorder::Binder binder(recorder); register_normal_handle(0); - register_cheap_handle(2); + register_interleaved_features_handle(2); register_normal_handle(3); - register_cheap_handle(3); + register_interleaved_features_handle(3); } auto md = MatchData::makeTestInstance(4, 4); recorder.tag_match_data(*md); @@ -88,9 +88,9 @@ TEST(HandleRecorderTest, tagging_of_matchdata_works) { HandleRecorder::Binder binder(recorder2); register_normal_handle(0); - register_cheap_handle(0); + register_interleaved_features_handle(0); register_normal_handle(1); - register_cheap_handle(3); + register_interleaved_features_handle(3); } recorder2.tag_match_data(*md); check_tagging(*md->resolveTermField(0), false, true, true); diff --git a/searchcore/src/tests/proton/matching/matching_test.cpp b/searchcore/src/tests/proton/matching/matching_test.cpp index 967d8bfd0aa..e46ed997d0f 100644 --- a/searchcore/src/tests/proton/matching/matching_test.cpp +++ b/searchcore/src/tests/proton/matching/matching_test.cpp @@ -6,7 +6,6 @@ #include <vespa/searchcommon/attribute/iattributecontext.h> #include <vespa/searchcore/proton/test/bucketfactory.h> #include <vespa/searchcore/proton/documentmetastore/documentmetastore.h> -#include <vespa/searchcore/proton/matching/error_constant_value.h> #include <vespa/searchcore/proton/matching/fakesearchcontext.h> #include <vespa/searchcore/proton/matching/i_constant_value_repo.h> #include <vespa/searchcore/proton/matching/isearchcontext.h> @@ -105,7 +104,7 @@ const uint32_t NUM_DOCS = 1000; struct EmptyConstantValueRepo : public proton::matching::IConstantValueRepo { virtual vespalib::eval::ConstantValue::UP getConstant(const vespalib::string &) const override { - return std::make_unique<proton::matching::ErrorConstantValue>(); + return vespalib::eval::ConstantValue::UP(nullptr); } }; diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp index d37072f5da2..9b535be19b7 100644 --- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp +++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.cpp @@ -1079,8 +1079,12 @@ DocumentMetaStore::foreach(const search::IGidToLidMapperVisitor &visitor) const } // namespace proton -template class search::btree:: -BTreeIterator<proton::DocumentMetaStore::DocId, - search::btree::BTreeNoLeafData, - search::btree::NoAggregated, - const proton::DocumentMetaStore::KeyComp &>; +namespace search::btree { + +template class BTreeIteratorBase<proton::DocumentMetaStore::DocId, BTreeNoLeafData, NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS, BTreeDefaultTraits::LEAF_SLOTS, BTreeDefaultTraits::PATH_SIZE>; + +template class BTreeConstIterator<proton::DocumentMetaStore::DocId, BTreeNoLeafData, NoAggregated, const proton::DocumentMetaStore::KeyComp &>; + +template class BTreeIterator<proton::DocumentMetaStore::DocId, BTreeNoLeafData, NoAggregated, const proton::DocumentMetaStore::KeyComp &>; + +} diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h index efac1158cfb..27c1c97556c 100644 --- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h +++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastore.h @@ -268,8 +268,12 @@ public: } -extern template class search::btree:: -BTreeIterator<proton::DocumentMetaStore::DocId, - search::btree::BTreeNoLeafData, - search::btree::NoAggregated, - const proton::DocumentMetaStore::KeyComp &>; +namespace search::btree { + +extern template class BTreeIteratorBase<proton::DocumentMetaStore::DocId, BTreeNoLeafData, NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS, BTreeDefaultTraits::LEAF_SLOTS, BTreeDefaultTraits::PATH_SIZE>; + +extern template class BTreeConstIterator<proton::DocumentMetaStore::DocId, BTreeNoLeafData, NoAggregated, const proton::DocumentMetaStore::KeyComp &>; + +extern template class BTreeIterator<proton::DocumentMetaStore::DocId, BTreeNoLeafData, NoAggregated, const proton::DocumentMetaStore::KeyComp &>; + +} diff --git a/searchcore/src/vespa/searchcore/proton/matching/constant_value_repo.cpp b/searchcore/src/vespa/searchcore/proton/matching/constant_value_repo.cpp index 7e355da041c..bdfa4013c86 100644 --- a/searchcore/src/vespa/searchcore/proton/matching/constant_value_repo.cpp +++ b/searchcore/src/vespa/searchcore/proton/matching/constant_value_repo.cpp @@ -1,7 +1,6 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #include "constant_value_repo.h" -#include "error_constant_value.h" using vespalib::eval::ConstantValue; @@ -27,7 +26,7 @@ ConstantValueRepo::getConstant(const vespalib::string &name) const if (constant != nullptr) { return _factory.create(constant->filePath, constant->type); } - return std::make_unique<ErrorConstantValue>(); + return ConstantValue::UP(nullptr); } } diff --git a/searchcore/src/vespa/searchcore/proton/matching/error_constant_value.h b/searchcore/src/vespa/searchcore/proton/matching/error_constant_value.h deleted file mode 100644 index 9b0b688085d..00000000000 --- a/searchcore/src/vespa/searchcore/proton/matching/error_constant_value.h +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. - -#pragma once - -#include <vespa/eval/eval/value_cache/constant_value.h> - -namespace proton { -namespace matching { - -/** - * Class representing an error constant value. - * Typically used to indicate that a named constant value does not exists. - */ -class ErrorConstantValue : public vespalib::eval::ConstantValue { -private: - vespalib::eval::ErrorValue _value; - vespalib::eval::ValueType _type; -public: - ErrorConstantValue() : _value(), _type(vespalib::eval::ValueType::error_type()) {} - virtual const vespalib::eval::Value &value() const override { return _value; } - virtual const vespalib::eval::ValueType &type() const override { return _type; } -}; - -} -} diff --git a/searchcore/src/vespa/searchcore/proton/matching/handlerecorder.cpp b/searchcore/src/vespa/searchcore/proton/matching/handlerecorder.cpp index 5f1e328be7a..566d3bae57e 100644 --- a/searchcore/src/vespa/searchcore/proton/matching/handlerecorder.cpp +++ b/searchcore/src/vespa/searchcore/proton/matching/handlerecorder.cpp @@ -60,7 +60,7 @@ HandleRecorder::to_string() const { vespalib::asciistream os; os << "normal: [" << handles_to_string(_handles, MatchDataDetails::Normal) << "], "; - os << "cheap: [" << handles_to_string(_handles, MatchDataDetails::Cheap) << "]"; + os << "interleaved: [" << handles_to_string(_handles, MatchDataDetails::Interleaved) << "]"; return os.str(); } @@ -107,7 +107,7 @@ HandleRecorder::add(TermFieldHandle handle, { if (requested_details == MatchDataDetails::Normal || - requested_details == MatchDataDetails::Cheap) { + requested_details == MatchDataDetails::Interleaved) { _handles[handle] = static_cast<MatchDataDetails>(static_cast<int>(_handles[handle]) | static_cast<int>(requested_details)); } else { abort(); @@ -124,7 +124,7 @@ HandleRecorder::tag_match_data(MatchData &match_data) tfmd.tagAsNotNeeded(); } else { tfmd.setNeedNormalFeatures((static_cast<int>(recorded->second) & static_cast<int>(MatchDataDetails::Normal)) != 0); - tfmd.setNeedCheapFeatures((static_cast<int>(recorded->second) & static_cast<int>(MatchDataDetails::Cheap)) != 0); + tfmd.setNeedInterleavedFeatures((static_cast<int>(recorded->second) & static_cast<int>(MatchDataDetails::Interleaved)) != 0); } } } diff --git a/searchcore/src/vespa/searchcore/proton/matching/i_constant_value_repo.h b/searchcore/src/vespa/searchcore/proton/matching/i_constant_value_repo.h index faa368c734b..5ac4fd14802 100644 --- a/searchcore/src/vespa/searchcore/proton/matching/i_constant_value_repo.h +++ b/searchcore/src/vespa/searchcore/proton/matching/i_constant_value_repo.h @@ -9,7 +9,7 @@ namespace matching { /** * Interface for retrieving a named constant rank value to be used by features in the rank framework. - * If the given value is not found an vespalib::eval::ErrorValue should be returned. + * If the given value is not found a nullptr should be returned. */ struct IConstantValueRepo { virtual vespalib::eval::ConstantValue::UP getConstant(const vespalib::string &name) const = 0; diff --git a/searchlib/src/tests/diskindex/diskindex/diskindex_test.cpp b/searchlib/src/tests/diskindex/diskindex/diskindex_test.cpp index 82a6f973cf9..d34f824d094 100644 --- a/searchlib/src/tests/diskindex/diskindex/diskindex_test.cpp +++ b/searchlib/src/tests/diskindex/diskindex/diskindex_test.cpp @@ -89,7 +89,7 @@ Verifier::Verifier(FakePosting::SP fp) { if (_fp) { _tfmd.setNeedNormalFeatures(_fp->enable_unpack_normal_features()); - _tfmd.setNeedCheapFeatures(_fp->enable_unpack_cheap_features()); + _tfmd.setNeedInterleavedFeatures(_fp->enable_unpack_interleaved_features()); } _tfmda.add(&_tfmd); } diff --git a/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp b/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp index 2b9f8a5b201..eb3218468ae 100644 --- a/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp +++ b/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp @@ -149,7 +149,7 @@ public: std::unique_ptr<FieldWriter> _fieldWriter; private: bool _dynamicK; - bool _encode_cheap_features; + bool _encode_interleaved_features; uint32_t _numWordIds; uint32_t _docIdLimit; vespalib::string _namepref; @@ -173,12 +173,12 @@ WrappedFieldWriter::~WrappedFieldWriter() {} WrappedFieldWriter::WrappedFieldWriter(const vespalib::string &namepref, bool dynamicK, - bool encode_cheap_features, + bool encode_interleaved_features, uint32_t numWordIds, uint32_t docIdLimit) : _fieldWriter(), _dynamicK(dynamicK), - _encode_cheap_features(encode_cheap_features), + _encode_interleaved_features(encode_interleaved_features), _numWordIds(numWordIds), _docIdLimit(docIdLimit), _namepref(dirprefix + namepref), @@ -200,7 +200,7 @@ WrappedFieldWriter::open() _fieldWriter = std::make_unique<FieldWriter>(_docIdLimit, _numWordIds); _fieldWriter->open(_namepref, minSkipDocs, minChunkDocs, - _dynamicK, _encode_cheap_features, + _dynamicK, _encode_interleaved_features, _schema, _indexId, FieldLengthInfo(4.5, 42), tuneFileWrite, fileHeaderContext); @@ -358,7 +358,7 @@ void writeField(FakeWordSet &wordSet, uint32_t docIdLimit, const std::string &namepref, - bool dynamicK, bool encode_cheap_features) + bool dynamicK, bool encode_interleaved_features) { const char *dynamicKStr = dynamicK ? "true" : "false"; @@ -368,14 +368,14 @@ writeField(FakeWordSet &wordSet, LOG(info, "enter writeField, " - "namepref=%s, dynamicK=%s, encode_cheap_features=%s", + "namepref=%s, dynamicK=%s, encode_interleaved_features=%s", namepref.c_str(), dynamicKStr, - bool_to_str(encode_cheap_features)); + bool_to_str(encode_interleaved_features)); tv.SetNow(); before = tv.Secs(); WrappedFieldWriter ostate(namepref, - dynamicK, encode_cheap_features, + dynamicK, encode_interleaved_features, wordSet.getNumWords(), docIdLimit); FieldWriter::remove(dirprefix + namepref); ostate.open(); @@ -394,11 +394,11 @@ writeField(FakeWordSet &wordSet, after = tv.Secs(); LOG(info, "leave writeField, " - "namepref=%s, dynamicK=%s, encode_cheap_features=%s" + "namepref=%s, dynamicK=%s, encode_interleaved_features=%s" " elapsed=%10.6f", namepref.c_str(), dynamicKStr, - bool_to_str(encode_cheap_features), + bool_to_str(encode_interleaved_features), after - before); } @@ -408,7 +408,7 @@ readField(FakeWordSet &wordSet, uint32_t docIdLimit, const std::string &namepref, bool dynamicK, - bool decode_cheap_features, + bool decode_interleaved_features, bool verbose) { const char *dynamicKStr = dynamicK ? "true" : "false"; @@ -420,10 +420,10 @@ readField(FakeWordSet &wordSet, docIdLimit); LOG(info, "enter readField, " - "namepref=%s, dynamicK=%s, decode_cheap_features=%s", + "namepref=%s, dynamicK=%s, decode_interleaved_features=%s", namepref.c_str(), dynamicKStr, - bool_to_str(decode_cheap_features)); + bool_to_str(decode_interleaved_features)); tv.SetNow(); before = tv.Secs(); istate.open(); @@ -442,7 +442,7 @@ readField(FakeWordSet &wordSet, TermFieldMatchDataArray tfmda; tfmda.add(&mdfield1); - word->validate(*istate._fieldReader, wordNum, tfmda, decode_cheap_features, verbose); + word->validate(*istate._fieldReader, wordNum, tfmda, decode_interleaved_features, verbose); ++wordNum; } } @@ -452,11 +452,11 @@ readField(FakeWordSet &wordSet, after = tv.Secs(); LOG(info, "leave readField, " - "namepref=%s, dynamicK=%s, decode_cheap_features=%s" + "namepref=%s, dynamicK=%s, decode_interleaved_features=%s" " elapsed=%10.6f", namepref.c_str(), dynamicKStr, - bool_to_str(decode_cheap_features), + bool_to_str(decode_interleaved_features), after - before); } @@ -465,7 +465,7 @@ void randReadField(FakeWordSet &wordSet, const std::string &namepref, bool dynamicK, - bool decode_cheap_features, + bool decode_interleaved_features, bool verbose) { const char *dynamicKStr = dynamicK ? "true" : "false"; @@ -477,10 +477,10 @@ randReadField(FakeWordSet &wordSet, LOG(info, "enter randReadField," - " namepref=%s, dynamicK=%s, decode_cheap_features=%s", + " namepref=%s, dynamicK=%s, decode_interleaved_features=%s", namepref.c_str(), dynamicKStr, - bool_to_str(decode_cheap_features)); + bool_to_str(decode_interleaved_features)); tv.SetNow(); before = tv.Secs(); @@ -545,12 +545,12 @@ randReadField(FakeWordSet &wordSet, sb(handle.createIterator(counts, tfmda)); // LOG(info, "loop=%d, wordNum=%u", loop, wordNum); - word->validate(sb.get(), tfmda, true, decode_cheap_features, verbose); - word->validate(sb.get(), tfmda, 19, true, decode_cheap_features, verbose); - word->validate(sb.get(), tfmda, 99, true, decode_cheap_features, verbose); - word->validate(sb.get(), tfmda, 799, true, decode_cheap_features, verbose); - word->validate(sb.get(), tfmda, 6399, true, decode_cheap_features, verbose); - word->validate(sb.get(), tfmda, 11999, true, decode_cheap_features, verbose); + word->validate(sb.get(), tfmda, true, decode_interleaved_features, verbose); + word->validate(sb.get(), tfmda, 19, true, decode_interleaved_features, verbose); + word->validate(sb.get(), tfmda, 99, true, decode_interleaved_features, verbose); + word->validate(sb.get(), tfmda, 799, true, decode_interleaved_features, verbose); + word->validate(sb.get(), tfmda, 6399, true, decode_interleaved_features, verbose); + word->validate(sb.get(), tfmda, 11999, true, decode_interleaved_features, verbose); ++wordNum; } } @@ -564,11 +564,11 @@ randReadField(FakeWordSet &wordSet, after = tv.Secs(); LOG(info, "leave randReadField, namepref=%s," - " dynamicK=%s, decode_cheap_features=%s, " + " dynamicK=%s, decode_interleaved_features=%s, " "elapsed=%10.6f", namepref.c_str(), dynamicKStr, - bool_to_str(decode_cheap_features), + bool_to_str(decode_interleaved_features), after - before); } @@ -580,7 +580,7 @@ fusionField(uint32_t numWordIds, const vespalib::string &opref, bool doRaw, bool dynamicK, - bool encode_cheap_features) + bool encode_interleaved_features) { const char *rawStr = doRaw ? "true" : "false"; const char *dynamicKStr = dynamicK ? "true" : "false"; @@ -589,17 +589,17 @@ fusionField(uint32_t numWordIds, LOG(info, "enter fusionField, ipref=%s, opref=%s," " raw=%s," - " dynamicK=%s, encode_cheap_features=%s", + " dynamicK=%s, encode_interleaved_features=%s", ipref.c_str(), opref.c_str(), rawStr, - dynamicKStr, bool_to_str(encode_cheap_features)); + dynamicKStr, bool_to_str(encode_interleaved_features)); FastOS_Time tv; double before; double after; WrappedFieldWriter ostate(opref, - dynamicK, encode_cheap_features, + dynamicK, encode_interleaved_features, numWordIds, docIdLimit); WrappedFieldReader istate(ipref, numWordIds, docIdLimit); @@ -628,12 +628,12 @@ fusionField(uint32_t numWordIds, after = tv.Secs(); LOG(info, "leave fusionField, ipref=%s, opref=%s," - " raw=%s dynamicK=%s, encode_cheap_features=%s," + " raw=%s dynamicK=%s, encode_interleaved_features=%s," " elapsed=%10.6f", ipref.c_str(), opref.c_str(), rawStr, - dynamicKStr, bool_to_str(encode_cheap_features), + dynamicKStr, bool_to_str(encode_interleaved_features), after - before); } @@ -642,20 +642,20 @@ void testFieldWriterVariant(FakeWordSet &wordSet, uint32_t doc_id_limit, const vespalib::string &file_name_prefix, bool dynamic_k, - bool encode_cheap_features, + bool encode_interleaved_features, bool verbose) { - writeField(wordSet, doc_id_limit, file_name_prefix, dynamic_k, encode_cheap_features); - readField(wordSet, doc_id_limit, file_name_prefix, dynamic_k, encode_cheap_features, verbose); - randReadField(wordSet, file_name_prefix, dynamic_k, encode_cheap_features, verbose); + writeField(wordSet, doc_id_limit, file_name_prefix, dynamic_k, encode_interleaved_features); + readField(wordSet, doc_id_limit, file_name_prefix, dynamic_k, encode_interleaved_features, verbose); + randReadField(wordSet, file_name_prefix, dynamic_k, encode_interleaved_features, verbose); fusionField(wordSet.getNumWords(), doc_id_limit, file_name_prefix, file_name_prefix + "x", - false, dynamic_k, encode_cheap_features); + false, dynamic_k, encode_interleaved_features); fusionField(wordSet.getNumWords(), doc_id_limit, file_name_prefix, file_name_prefix + "xx", - true, dynamic_k, encode_cheap_features); + true, dynamic_k, encode_interleaved_features); check_fusion(file_name_prefix); remove_field(file_name_prefix); } diff --git a/searchlib/src/tests/features/ranking_expression/ranking_expression_test.cpp b/searchlib/src/tests/features/ranking_expression/ranking_expression_test.cpp index c7c3447a4cc..251040ecfa7 100644 --- a/searchlib/src/tests/features/ranking_expression/ranking_expression_test.cpp +++ b/searchlib/src/tests/features/ranking_expression/ranking_expression_test.cpp @@ -26,6 +26,8 @@ struct DummyExpression : IntrinsicExpression { DummyExpression(const FeatureType &type_in) : type(type_in) {} vespalib::string describe_self() const override { return "dummy"; } const FeatureType &result_type() const override { return type; } + void prepare_shared_state(const QueryEnv &, IObjectStore &) const override { + } FeatureExecutor &create_executor(const QueryEnv &, vespalib::Stash &stash) const override { return stash.create<DummyExecutor>(); } @@ -81,7 +83,7 @@ SetupResult::SetupResult(const TypeMap &object_inputs, setup_ok = rank.setup(index_env, {}); EXPECT_TRUE(!deps.accept_type_mismatch); } -SetupResult::~SetupResult() {} +SetupResult::~SetupResult() = default; void verify_output_type(const TypeMap &object_inputs, const vespalib::string &expression, const FeatureType &expect) diff --git a/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp b/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp index 3a0c334fbba..a75b91cb78a 100644 --- a/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp +++ b/searchlib/src/tests/fef/termfieldmodel/termfieldmodel_test.cpp @@ -257,32 +257,32 @@ TEST("require that TermFieldMatchData can be tagged as needed or not") { EXPECT_EQUAL(tfmd.getFieldId(),123u); EXPECT_TRUE(!tfmd.isNotNeeded()); EXPECT_TRUE(tfmd.needs_normal_features()); - EXPECT_TRUE(tfmd.needs_cheap_features()); + EXPECT_TRUE(tfmd.needs_interleaved_features()); tfmd.tagAsNotNeeded(); EXPECT_EQUAL(tfmd.getFieldId(),123u); EXPECT_TRUE(tfmd.isNotNeeded()); EXPECT_TRUE(!tfmd.needs_normal_features()); - EXPECT_TRUE(!tfmd.needs_cheap_features()); + EXPECT_TRUE(!tfmd.needs_interleaved_features()); tfmd.setNeedNormalFeatures(true); EXPECT_EQUAL(tfmd.getFieldId(),123u); EXPECT_TRUE(!tfmd.isNotNeeded()); EXPECT_TRUE(tfmd.needs_normal_features()); - EXPECT_TRUE(!tfmd.needs_cheap_features()); - tfmd.setNeedCheapFeatures(true); + EXPECT_TRUE(!tfmd.needs_interleaved_features()); + tfmd.setNeedInterleavedFeatures(true); EXPECT_EQUAL(tfmd.getFieldId(),123u); EXPECT_TRUE(!tfmd.isNotNeeded()); EXPECT_TRUE(tfmd.needs_normal_features()); - EXPECT_TRUE(tfmd.needs_cheap_features()); + EXPECT_TRUE(tfmd.needs_interleaved_features()); tfmd.setNeedNormalFeatures(false); EXPECT_EQUAL(tfmd.getFieldId(),123u); EXPECT_TRUE(!tfmd.isNotNeeded()); EXPECT_TRUE(!tfmd.needs_normal_features()); - EXPECT_TRUE(tfmd.needs_cheap_features()); - tfmd.setNeedCheapFeatures(false); + EXPECT_TRUE(tfmd.needs_interleaved_features()); + tfmd.setNeedInterleavedFeatures(false); EXPECT_EQUAL(tfmd.getFieldId(),123u); EXPECT_TRUE(tfmd.isNotNeeded()); EXPECT_TRUE(!tfmd.needs_normal_features()); - EXPECT_TRUE(!tfmd.needs_cheap_features()); + EXPECT_TRUE(!tfmd.needs_interleaved_features()); } TEST("require that MatchData soft_reset retains appropriate state") { diff --git a/searchlib/src/tests/memoryindex/field_index/field_index_iterator_test.cpp b/searchlib/src/tests/memoryindex/field_index/field_index_iterator_test.cpp index df7f80e8601..36e9bde5c9f 100644 --- a/searchlib/src/tests/memoryindex/field_index/field_index_iterator_test.cpp +++ b/searchlib/src/tests/memoryindex/field_index/field_index_iterator_test.cpp @@ -18,10 +18,13 @@ using namespace search::memoryindex; using search::index::schema::DataType; using search::test::SearchIteratorVerifier; +using FieldIndexType = FieldIndex<false>; +using PostingIteratorType = PostingIterator<false>; + class Verifier : public SearchIteratorVerifier { private: mutable TermFieldMatchData _tfmd; - FieldIndex _field_index; + FieldIndexType _field_index; public: Verifier(const Schema& schema) @@ -41,8 +44,8 @@ public: (void) strict; TermFieldMatchDataArray match_data; match_data.add(&_tfmd); - return std::make_unique<PostingIterator>(_field_index.find("a"), - _field_index.getFeatureStore(), 0, match_data); + return std::make_unique<PostingIteratorType>(_field_index.find("a"), + _field_index.getFeatureStore(), 0, match_data); } }; diff --git a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp index 05c905cdc32..f2cc2580cd8 100644 --- a/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp +++ b/searchlib/src/tests/memoryindex/field_index/field_index_test.cpp @@ -40,8 +40,10 @@ using vespalib::GenerationHandler; namespace memoryindex { using test::WrapInserter; -using PostingList = FieldIndex::PostingList; +using FieldIndexType = FieldIndex<false>; +using PostingList = FieldIndexType::PostingList; using PostingConstItr = PostingList::ConstIterator; +using PostingIteratorType = PostingIterator<false>; class MyBuilder : public IndexBuilder { private: @@ -171,7 +173,7 @@ assertPostingList(const std::string &exp, uint32_t docId = itr.getKey(); ss << docId; if (store != nullptr) { // consider features as well - EntryRef ref(itr.getData()); + EntryRef ref(itr.getData().get_features()); store->setupForField(0, decoder); store->setupForUnpackFeatures(ref, decoder); decoder.unpackFeatures(matchData, docId); @@ -197,6 +199,25 @@ assertPostingList(std::vector<uint32_t> &exp, PostingConstItr itr) return assertPostingList(ss.str(), itr); } +FieldIndexType::PostingList::Iterator +find_in_field_index(const vespalib::stringref word, + uint32_t fieldId, + const FieldIndexCollection& fic) +{ + auto* field_index = dynamic_cast<FieldIndexType*>(fic.getFieldIndex(fieldId)); + assert(field_index != nullptr); + return field_index->find(word); +} + +FieldIndexType::PostingList::ConstIterator +find_frozen_in_field_index(const vespalib::stringref word, + uint32_t fieldId, + const FieldIndexCollection& fic) +{ + auto* field_index = dynamic_cast<FieldIndexType*>(fic.getFieldIndex(fieldId)); + assert(field_index != nullptr); + return field_index->findFrozen(word); +} namespace { @@ -332,7 +353,7 @@ public: bool assertPosting(const vespalib::string &word, uint32_t fieldId) { std::vector<uint32_t> exp = _mock.find(word, fieldId); - PostingConstItr itr = _fieldIndexes.find(word, fieldId); + PostingConstItr itr = find_in_field_index(word, fieldId, _fieldIndexes); bool result = assertPostingList(exp, itr); EXPECT_TRUE(result); return result; @@ -390,7 +411,7 @@ public: { } - MyDrainRemoves(FieldIndex& field_index) + MyDrainRemoves(FieldIndexType& field_index) : _remover(field_index.getDocumentRemover()) { } @@ -468,7 +489,7 @@ make_single_field_schema() struct FieldIndexTest : public ::testing::Test { Schema schema; - FieldIndex idx; + FieldIndexType idx; FieldIndexTest() : schema(make_single_field_schema()), idx(schema, 0) @@ -487,6 +508,8 @@ make_multi_field_schema() return result; } + + struct FieldIndexCollectionTest : public ::testing::Test { Schema schema; FieldIndexCollection fic; @@ -496,6 +519,11 @@ struct FieldIndexCollectionTest : public ::testing::Test { { } ~FieldIndexCollectionTest() {} + + FieldIndexType::PostingList::Iterator find(const vespalib::stringref word, + uint32_t fieldId) const { + return find_in_field_index(word, fieldId, fic); + } }; TEST_F(FieldIndexTest, require_that_fresh_insert_works) @@ -529,12 +557,12 @@ TEST_F(FieldIndexCollectionTest, require_that_multiple_posting_lists_across_mult WrapInserter(fic, 0).word("a").add(10).word("b").add(11).add(15).flush(); WrapInserter(fic, 1).word("a").add(5).word("b").add(12).flush(); EXPECT_EQ(4u, fic.getNumUniqueWords()); - EXPECT_TRUE(assertPostingList("[10]", fic.find("a", 0))); - EXPECT_TRUE(assertPostingList("[5]", fic.find("a", 1))); - EXPECT_TRUE(assertPostingList("[11,15]", fic.find("b", 0))); - EXPECT_TRUE(assertPostingList("[12]", fic.find("b", 1))); - EXPECT_TRUE(assertPostingList("[]", fic.find("a", 2))); - EXPECT_TRUE(assertPostingList("[]", fic.find("c", 0))); + EXPECT_TRUE(assertPostingList("[10]", find("a", 0))); + EXPECT_TRUE(assertPostingList("[5]", find("a", 1))); + EXPECT_TRUE(assertPostingList("[11,15]", find("b", 0))); + EXPECT_TRUE(assertPostingList("[12]", find("b", 1))); + EXPECT_TRUE(assertPostingList("[]", find("a", 2))); + EXPECT_TRUE(assertPostingList("[]", find("c", 0))); } TEST_F(FieldIndexTest, require_that_remove_works) @@ -622,16 +650,16 @@ TEST_F(FieldIndexCollectionTest, require_that_features_are_in_posting_lists) { WrapInserter(fic, 0).word("a").add(1, getFeatures(4, 2)).flush(); EXPECT_TRUE(assertPostingList("[1{4:0,1}]", - fic.find("a", 0), + find("a", 0), featureStorePtr(fic, 0))); WrapInserter(fic, 0).word("b").add(2, getFeatures(5, 1)). add(3, getFeatures(6, 2)).flush(); EXPECT_TRUE(assertPostingList("[2{5:0},3{6:0,1}]", - fic.find("b", 0), + find("b", 0), featureStorePtr(fic, 0))); WrapInserter(fic, 1).word("c").add(4, getFeatures(7, 2)).flush(); EXPECT_TRUE(assertPostingList("[4{7:0,1}]", - fic.find("c", 1), + find("c", 1), featureStorePtr(fic, 1))); } @@ -645,16 +673,16 @@ TEST_F(FieldIndexTest, require_that_posting_iterator_is_working) TermFieldMatchDataArray matchData; matchData.add(&tfmd); { - PostingIterator itr(idx.find("not"), - idx.getFeatureStore(), - 0, matchData); + PostingIteratorType itr(idx.find("not"), + idx.getFeatureStore(), + 0, matchData); itr.initFullRange(); EXPECT_TRUE(itr.isAtEnd()); } { - PostingIterator itr(idx.find("a"), - idx.getFeatureStore(), - 0, matchData); + PostingIteratorType itr(idx.find("a"), + idx.getFeatureStore(), + 0, matchData); itr.initFullRange(); EXPECT_EQ(10u, itr.getDocId()); itr.unpack(10); @@ -764,6 +792,12 @@ public: _inv(_schema, _invertThreads, _pushThreads, _fic) { } + PostingList::Iterator find(const vespalib::stringref word, uint32_t fieldId) const { + return find_in_field_index(word, fieldId, _fic); + } + PostingList::ConstIterator findFrozen(const vespalib::stringref word, uint32_t fieldId) const { + return find_frozen_in_field_index(word, fieldId, _fic); + } }; class BasicInverterTest : public InverterTest { @@ -922,12 +956,12 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working) TermFieldMatchDataArray matchData; matchData.add(&tfmd); { - PostingIterator itr(_fic.findFrozen("not", 0), featureStoreRef(_fic, 0), 0, matchData); + PostingIteratorType itr(findFrozen("not", 0), featureStoreRef(_fic, 0), 0, matchData); itr.initFullRange(); EXPECT_TRUE(itr.isAtEnd()); } { - PostingIterator itr(_fic.findFrozen("a", 0), featureStoreRef(_fic, 0), 0, matchData); + PostingIteratorType itr(findFrozen("a", 0), featureStoreRef(_fic, 0), 0, matchData); itr.initFullRange(); EXPECT_EQ(10u, itr.getDocId()); itr.unpack(10); @@ -944,19 +978,19 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working) EXPECT_TRUE(itr.isAtEnd()); } { - PostingIterator itr(_fic.findFrozen("x", 0), featureStoreRef(_fic, 0), 0, matchData); + PostingIteratorType itr(findFrozen("x", 0), featureStoreRef(_fic, 0), 0, matchData); itr.initFullRange(); EXPECT_TRUE(itr.isAtEnd()); } { - PostingIterator itr(_fic.findFrozen("x", 1), featureStoreRef(_fic, 1), 1, matchData); + PostingIteratorType itr(findFrozen("x", 1), featureStoreRef(_fic, 1), 1, matchData); itr.initFullRange(); EXPECT_EQ(30u, itr.getDocId()); itr.unpack(30); EXPECT_EQ("{6:2[e=0,w=1,l=6]}", toString(tfmd.getIterator(), true, true)); } { - PostingIterator itr(_fic.findFrozen("x", 2), featureStoreRef(_fic, 2), 2, matchData); + PostingIteratorType itr(findFrozen("x", 2), featureStoreRef(_fic, 2), 2, matchData); itr.initFullRange(); EXPECT_EQ(30u, itr.getDocId()); itr.unpack(30); @@ -964,7 +998,7 @@ TEST_F(BasicInverterTest, require_that_inversion_is_working) EXPECT_EQ("{2:1[e=0,w=1,l=2]}", toString(tfmd.getIterator(), true, true)); } { - PostingIterator itr(_fic.findFrozen("x", 3), featureStoreRef(_fic, 3), 3, matchData); + PostingIteratorType itr(findFrozen("x", 3), featureStoreRef(_fic, 3), 3, matchData); itr.initFullRange(); EXPECT_EQ(30u, itr.getDocId()); itr.unpack(30); @@ -994,20 +1028,20 @@ TEST_F(BasicInverterTest, require_that_inverter_handles_remove_via_document_remo myPushDocument(_inv); _pushThreads.sync(); - EXPECT_TRUE(assertPostingList("[1]", _fic.find("a", 0))); - EXPECT_TRUE(assertPostingList("[1,2]", _fic.find("b", 0))); - EXPECT_TRUE(assertPostingList("[2]", _fic.find("c", 0))); - EXPECT_TRUE(assertPostingList("[1]", _fic.find("a", 1))); - EXPECT_TRUE(assertPostingList("[1]", _fic.find("c", 1))); + EXPECT_TRUE(assertPostingList("[1]", find("a", 0))); + EXPECT_TRUE(assertPostingList("[1,2]", find("b", 0))); + EXPECT_TRUE(assertPostingList("[2]", find("c", 0))); + EXPECT_TRUE(assertPostingList("[1]", find("a", 1))); + EXPECT_TRUE(assertPostingList("[1]", find("c", 1))); myremove(1, _inv, _invertThreads); _pushThreads.sync(); - EXPECT_TRUE(assertPostingList("[]", _fic.find("a", 0))); - EXPECT_TRUE(assertPostingList("[2]", _fic.find("b", 0))); - EXPECT_TRUE(assertPostingList("[2]", _fic.find("c", 0))); - EXPECT_TRUE(assertPostingList("[]", _fic.find("a", 1))); - EXPECT_TRUE(assertPostingList("[]", _fic.find("c", 1))); + EXPECT_TRUE(assertPostingList("[]", find("a", 0))); + EXPECT_TRUE(assertPostingList("[2]", find("b", 0))); + EXPECT_TRUE(assertPostingList("[2]", find("c", 0))); + EXPECT_TRUE(assertPostingList("[]", find("a", 1))); + EXPECT_TRUE(assertPostingList("[]", find("c", 1))); } Schema @@ -1161,17 +1195,17 @@ TEST_F(UriInverterTest, require_that_uri_indexing_is_working) matchData.add(&tfmd); { uint32_t fieldId = _schema.getIndexFieldId("iu"); - PostingIterator itr(_fic.findFrozen("not", fieldId), - featureStoreRef(_fic, fieldId), - fieldId, matchData); + PostingIteratorType itr(findFrozen("not", fieldId), + featureStoreRef(_fic, fieldId), + fieldId, matchData); itr.initFullRange(); EXPECT_TRUE(itr.isAtEnd()); } { uint32_t fieldId = _schema.getIndexFieldId("iu"); - PostingIterator itr(_fic.findFrozen("example", fieldId), - featureStoreRef(_fic, fieldId), - fieldId, matchData); + PostingIteratorType itr(findFrozen("example", fieldId), + featureStoreRef(_fic, fieldId), + fieldId, matchData); itr.initFullRange(); EXPECT_EQ(10u, itr.getDocId()); itr.unpack(10); @@ -1181,9 +1215,9 @@ TEST_F(UriInverterTest, require_that_uri_indexing_is_working) } { uint32_t fieldId = _schema.getIndexFieldId("iau"); - PostingIterator itr(_fic.findFrozen("example", fieldId), - featureStoreRef(_fic, fieldId), - fieldId, matchData); + PostingIteratorType itr(findFrozen("example", fieldId), + featureStoreRef(_fic, fieldId), + fieldId, matchData); itr.initFullRange(); EXPECT_EQ(10u, itr.getDocId()); itr.unpack(10); @@ -1194,9 +1228,9 @@ TEST_F(UriInverterTest, require_that_uri_indexing_is_working) } { uint32_t fieldId = _schema.getIndexFieldId("iwu"); - PostingIterator itr(_fic.findFrozen("example", fieldId), - featureStoreRef(_fic, fieldId), - fieldId, matchData); + PostingIteratorType itr(findFrozen("example", fieldId), + featureStoreRef(_fic, fieldId), + fieldId, matchData); itr.initFullRange(); EXPECT_EQ(10u, itr.getDocId()); itr.unpack(10); @@ -1247,18 +1281,18 @@ TEST_F(CjkInverterTest, require_that_cjk_indexing_is_working) matchData.add(&tfmd); uint32_t fieldId = _schema.getIndexFieldId("f0"); { - PostingIterator itr(_fic.findFrozen("not", fieldId), - featureStoreRef(_fic, fieldId), - fieldId, matchData); + PostingIteratorType itr(findFrozen("not", fieldId), + featureStoreRef(_fic, fieldId), + fieldId, matchData); itr.initFullRange(); EXPECT_TRUE(itr.isAtEnd()); } { - PostingIterator itr(_fic.findFrozen("我就" - "是那个", - fieldId), - featureStoreRef(_fic, fieldId), - fieldId, matchData); + PostingIteratorType itr(findFrozen("我就" + "是那个", + fieldId), + featureStoreRef(_fic, fieldId), + fieldId, matchData); itr.initFullRange(); EXPECT_EQ(10u, itr.getDocId()); itr.unpack(10); @@ -1267,11 +1301,11 @@ TEST_F(CjkInverterTest, require_that_cjk_indexing_is_working) EXPECT_TRUE(itr.isAtEnd()); } { - PostingIterator itr(_fic.findFrozen("大灰" - "狼", - fieldId), - featureStoreRef(_fic, fieldId), - fieldId, matchData); + PostingIteratorType itr(findFrozen("大灰" + "狼", + fieldId), + featureStoreRef(_fic, fieldId), + fieldId, matchData); itr.initFullRange(); EXPECT_EQ(10u, itr.getDocId()); itr.unpack(10); @@ -1315,9 +1349,9 @@ struct RemoverTest : public FieldIndexCollectionTest { void assertPostingLists(const vespalib::string &e1, const vespalib::string &e2, const vespalib::string &e3) { - EXPECT_TRUE(assertPostingList(e1, fic.find("a", 1))); - EXPECT_TRUE(assertPostingList(e2, fic.find("a", 2))); - EXPECT_TRUE(assertPostingList(e3, fic.find("b", 1))); + EXPECT_TRUE(assertPostingList(e1, find("a", 1))); + EXPECT_TRUE(assertPostingList(e2, find("a", 2))); + EXPECT_TRUE(assertPostingList(e3, find("b", 1))); } void remove(uint32_t docId) { DocumentInverter inv(schema, _invertThreads, _pushThreads, fic); diff --git a/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp b/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp index f741d902d3e..ff0629d2172 100644 --- a/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp +++ b/searchlib/src/tests/memoryindex/field_inverter/field_inverter_test.cpp @@ -354,11 +354,11 @@ TEST_F("require that multiple words at same position works", Fixture) f._inserter.toStr()); } -TEST_F("require that cheap features are calculated", Fixture) +TEST_F("require that interleaved features are calculated", Fixture) { f.invertDocument(17, *makeDoc17(f._b)); f._inserter.setVerbose(); - f._inserter.set_show_cheap_features(); + f._inserter.set_show_interleaved_features(); f.pushDocuments(); EXPECT_EQUAL("f=1," "w=bar0,a=17(fl=2,occs=1,e=0,w=1,l=2[1])," diff --git a/searchlib/src/tests/postinglistbm/posting_list_test.cpp b/searchlib/src/tests/postinglistbm/posting_list_test.cpp index 2022ffb1b8e..a7d810cb1a1 100644 --- a/searchlib/src/tests/postinglistbm/posting_list_test.cpp +++ b/searchlib/src/tests/postinglistbm/posting_list_test.cpp @@ -26,10 +26,10 @@ validate_posting_list_for_word(const FakePosting& posting, const FakeWord& word) tfmda.add(&md); md.setNeedNormalFeatures(posting.enable_unpack_normal_features()); - md.setNeedCheapFeatures(posting.enable_unpack_cheap_features()); + md.setNeedInterleavedFeatures(posting.enable_unpack_interleaved_features()); std::unique_ptr<SearchIterator> iterator(posting.createIterator(tfmda)); if (posting.hasWordPositions()) { - word.validate(iterator.get(), tfmda, posting.enable_unpack_normal_features(), posting.has_cheap_features() && posting.enable_unpack_cheap_features(), false); + word.validate(iterator.get(), tfmda, posting.enable_unpack_normal_features(), posting.has_interleaved_features() && posting.enable_unpack_interleaved_features(), false); } else { word.validate(iterator.get(), false); } diff --git a/searchlib/src/tests/postinglistbm/stress_runner.cpp b/searchlib/src/tests/postinglistbm/stress_runner.cpp index 5353008965d..a2a2968c13c 100644 --- a/searchlib/src/tests/postinglistbm/stress_runner.cpp +++ b/searchlib/src/tests/postinglistbm/stress_runner.cpp @@ -215,13 +215,13 @@ makeSomePostings(FPFactory *postingFactory, tfmda.add(&md); md.setNeedNormalFeatures(posting->enable_unpack_normal_features()); - md.setNeedCheapFeatures(posting->enable_unpack_cheap_features()); + md.setNeedInterleavedFeatures(posting->enable_unpack_interleaved_features()); std::unique_ptr<SearchIterator> iterator(posting->createIterator(tfmda)); if (posting->hasWordPositions()) { if (stride != 0) { - word->validate(iterator.get(), tfmda, stride, posting->enable_unpack_normal_features(), posting->has_cheap_features() && posting->enable_unpack_cheap_features(), verbose); + word->validate(iterator.get(), tfmda, stride, posting->enable_unpack_normal_features(), posting->has_interleaved_features() && posting->enable_unpack_interleaved_features(), verbose); } else { - word->validate(iterator.get(), tfmda, posting->enable_unpack_normal_features(), posting->has_cheap_features() && posting->enable_unpack_cheap_features(), verbose); + word->validate(iterator.get(), tfmda, posting->enable_unpack_normal_features(), posting->has_interleaved_features() && posting->enable_unpack_interleaved_features(), verbose); } } else { word->validate(iterator.get(), verbose); diff --git a/searchlib/src/vespa/searchlib/attribute/enumcomparator.h b/searchlib/src/vespa/searchlib/attribute/enumcomparator.h index 4cd446352d0..255d0bead9f 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumcomparator.h +++ b/searchlib/src/vespa/searchlib/attribute/enumcomparator.h @@ -27,13 +27,18 @@ public: /** * Creates a comparator using the given enum store. **/ - EnumStoreComparatorT(const EnumStoreType & enumStore); + EnumStoreComparatorT(const EnumStoreType & enumStore) + : _enumStore(enumStore), + _value() + {} /** * Creates a comparator using the given enum store and that uses the * given value during compare if the enum index is invalid. **/ - EnumStoreComparatorT(const EnumStoreType & enumStore, - EntryValue value); + EnumStoreComparatorT(const EnumStoreType & enumStore, EntryValue value) + : _enumStore(enumStore), + _value(value) + {} static int compare(EntryValue lhs, EntryValue rhs) { if (lhs < rhs) { @@ -60,7 +65,7 @@ private: typedef typename ParentType::EnumIndex EnumIndex; typedef typename ParentType::EntryValue EntryValue; using ParentType::getValue; - bool _prefix; + bool _prefix; size_t _prefixLen; public: /** @@ -90,22 +95,6 @@ public: } }; - -template <typename EntryType> -EnumStoreComparatorT<EntryType>::EnumStoreComparatorT(const EnumStoreType & enumStore) : - _enumStore(enumStore), - _value() -{ -} - -template <typename EntryType> -EnumStoreComparatorT<EntryType>::EnumStoreComparatorT(const EnumStoreType & enumStore, - EntryValue value) : - _enumStore(enumStore), - _value(value) -{ -} - template <> int EnumStoreComparatorT<NumericEntryType<float> >::compare(EntryValue lhs, EntryValue rhs); diff --git a/searchlib/src/vespa/searchlib/attribute/enumstorebase.cpp b/searchlib/src/vespa/searchlib/attribute/enumstorebase.cpp index 94c431368cb..decb4152d8d 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstorebase.cpp +++ b/searchlib/src/vespa/searchlib/attribute/enumstorebase.cpp @@ -704,6 +704,10 @@ template class btree::BTreeIteratorBase<EnumStoreBase::Index, datastore::EntryRef, btree::NoAggregated, EnumTreeTraits::INTERNAL_SLOTS, EnumTreeTraits::LEAF_SLOTS, EnumTreeTraits::PATH_SIZE>; +template class btree::BTreeConstIterator<EnumStoreBase::Index, btree::BTreeNoLeafData, btree::NoAggregated, const EnumStoreComparatorWrapper, EnumTreeTraits>; + +template class btree::BTreeConstIterator<EnumStoreBase::Index, datastore::EntryRef, btree::NoAggregated, const EnumStoreComparatorWrapper, EnumTreeTraits>; + template class btree::BTreeIterator<EnumStoreBase::Index, btree::BTreeNoLeafData, btree::NoAggregated, const EnumStoreComparatorWrapper, EnumTreeTraits>; diff --git a/searchlib/src/vespa/searchlib/attribute/enumstorebase.h b/searchlib/src/vespa/searchlib/attribute/enumstorebase.h index 48bf4a56874..d8604a5a85e 100644 --- a/searchlib/src/vespa/searchlib/attribute/enumstorebase.h +++ b/searchlib/src/vespa/searchlib/attribute/enumstorebase.h @@ -467,6 +467,10 @@ extern template class btree::BTreeIteratorBase<EnumStoreBase::Index, datastore::EntryRef, btree::NoAggregated, EnumTreeTraits::INTERNAL_SLOTS, EnumTreeTraits::LEAF_SLOTS, EnumTreeTraits::PATH_SIZE>; +extern template class btree::BTreeConstIterator<EnumStoreBase::Index, btree::BTreeNoLeafData, btree::NoAggregated, const EnumStoreComparatorWrapper, EnumTreeTraits>; + +extern template class btree::BTreeConstIterator<EnumStoreBase::Index, datastore::EntryRef, btree::NoAggregated, const EnumStoreComparatorWrapper, EnumTreeTraits>; + extern template class btree::BTreeIterator<EnumStoreBase::Index, btree::BTreeNoLeafData, btree::NoAggregated, const EnumStoreComparatorWrapper, EnumTreeTraits>; diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp index ae308db1a4f..19157c80197 100644 --- a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp @@ -38,7 +38,7 @@ FieldWriter::open(const vespalib::string &prefix, uint32_t minSkipDocs, uint32_t minChunkDocs, bool dynamicKPosOccFormat, - bool encode_cheap_features, + bool encode_interleaved_features, const Schema &schema, const uint32_t indexId, const FieldLengthInfo &field_length_info, @@ -62,8 +62,8 @@ FieldWriter::open(const vespalib::string &prefix, countParams.set("minChunkDocs", minChunkDocs); params.set("minChunkDocs", minChunkDocs); } - if (encode_cheap_features) { - params.set("cheap_features", encode_cheap_features); + if (encode_interleaved_features) { + params.set("interleaved_features", encode_interleaved_features); } _dictFile = std::make_unique<PageDict4FileSeqWrite>(); diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h index 69c763bbd77..0d205fb9d98 100644 --- a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h +++ b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h @@ -69,7 +69,7 @@ public: bool open(const vespalib::string &prefix, uint32_t minSkipDocs, uint32_t minChunkDocs, bool dynamicKPosOccFormat, - bool encode_cheap_features, + bool encode_interleaved_features, const Schema &schema, uint32_t indexId, const index::FieldLengthInfo &field_length_info, const TuneFileSeqWrite &tuneFileWrite, diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_params.h b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_params.h index 8a5564d1cf7..7face2bcef9 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_params.h +++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_params.h @@ -15,15 +15,15 @@ struct Zc4PostingParams { uint32_t _doc_id_limit; bool _dynamic_k; bool _encode_features; - bool _encode_cheap_features; + bool _encode_interleaved_features; - Zc4PostingParams(uint32_t min_skip_docs, uint32_t min_chunk_docs, uint32_t doc_id_limit, bool dynamic_k, bool encode_features, bool encode_cheap_features) + Zc4PostingParams(uint32_t min_skip_docs, uint32_t min_chunk_docs, uint32_t doc_id_limit, bool dynamic_k, bool encode_features, bool encode_interleaved_features) : _min_skip_docs(min_skip_docs), _min_chunk_docs(min_chunk_docs), _doc_id_limit(doc_id_limit), _dynamic_k(dynamic_k), _encode_features(encode_features), - _encode_cheap_features(encode_cheap_features) + _encode_interleaved_features(encode_interleaved_features) { } }; diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp index ab2598211bb..71a836fb0ac 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader.cpp @@ -50,7 +50,7 @@ Zc4PostingReader<bigEndian>::read_doc_id_and_features(DocIdAndFeatures &features UC64_DECODEEXPGOLOMB_SMALL_NS(o, _doc_id_k, EC); _no_skip.set_doc_id(_no_skip.get_doc_id() + 1 + val64); - if (_posting_params._encode_cheap_features) { + if (_posting_params._encode_interleaved_features) { if (__builtin_expect(oCompr >= d._valE, false)) { UC64_DECODECONTEXT_STORE(o, d._); _readContext.readComprBuffer(); @@ -73,7 +73,7 @@ Zc4PostingReader<bigEndian>::read_doc_id_and_features(DocIdAndFeatures &features } features.set_doc_id(_no_skip.get_doc_id()); if (_posting_params._encode_features) { - if (_posting_params._encode_cheap_features) { + if (_posting_params._encode_interleaved_features) { features.set_field_length(_no_skip.get_field_length()); features.set_num_occs(_no_skip.get_num_occs()); } diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp index 6bc854e08af..198a3beebfc 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.cpp @@ -51,11 +51,11 @@ Zc4PostingReaderBase::NoSkip::NoSkip() Zc4PostingReaderBase::NoSkip::~NoSkip() = default; void -Zc4PostingReaderBase::NoSkip::read(bool decode_cheap_features) +Zc4PostingReaderBase::NoSkip::read(bool decode_interleaved_features) { assert(_zc_buf._valI < _zc_buf._valE); _doc_id += (_zc_buf.decode()+ 1); - if (decode_cheap_features) { + if (decode_interleaved_features) { _field_length = _zc_buf.decode() + 1; _num_occs = _zc_buf.decode() + 1; } @@ -219,7 +219,7 @@ Zc4PostingReaderBase::read_common_word_doc_id(DecodeContext64Base &decode_contex } _l1_skip.next_skip_entry(); } - _no_skip.read(_posting_params._encode_cheap_features); + _no_skip.read(_posting_params._encode_interleaved_features); if (_residue == 1) { _no_skip.check_end(_last_doc_id); _l1_skip.check_end(_last_doc_id); diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.h b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.h index 9eff9117450..e3de8b61b08 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.h +++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_reader_base.h @@ -31,7 +31,7 @@ protected: ~NoSkipBase(); void setup(DecodeContext &decode_context, uint32_t size, uint32_t doc_id); void set_features_pos(uint64_t features_pos) { _features_pos = features_pos; } - void read(bool decode_cheap_features); + void read(bool decode_interleaved_features); void check_end(uint32_t last_doc_id); uint32_t get_doc_id() const { return _doc_id; } uint32_t get_doc_id_pos() const { return _doc_id_pos; } @@ -45,7 +45,7 @@ protected: public: NoSkip(); ~NoSkip(); - void read(bool decode_cheap_features); + void read(bool decode_interleaved_features); void check_not_end(uint32_t last_doc_id); uint32_t get_field_length() const { return _field_length; } uint32_t get_num_occs() const { return _num_occs; } diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp index ad8f7440bdc..58916b2cfac 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer.cpp @@ -191,7 +191,7 @@ Zc4PostingWriter<bigEndian>::flush_word_no_skip() uint32_t featureSize = dit->_features_size; e.encodeExpGolomb(docId - baseDocId, docIdK); baseDocId = docId + 1; - if (_encode_cheap_features) { + if (_encode_interleaved_features) { assert(dit->_field_length > 0); e.encodeExpGolomb(dit->_field_length - 1, K_VALUE_ZCPOSTING_FIELD_LENGTH); assert(dit->_num_occs > 0); diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp index 6e0cf6ed881..04a15213f6a 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.cpp @@ -25,7 +25,7 @@ public: { } - void write(ZcBuf &zc_buf, const DocIdAndFeatureSize &doc_id_and_feature_size, bool encode_cheap_features); + void write(ZcBuf &zc_buf, const DocIdAndFeatureSize &doc_id_and_feature_size, bool encode_interleaved_features); void set_doc_id(uint32_t doc_id) { _doc_id = doc_id; } uint32_t get_doc_id() const { return _doc_id; } uint32_t get_doc_id_pos() const { return _doc_id_pos; } @@ -100,12 +100,12 @@ public: }; void -DocIdEncoder::write(ZcBuf &zc_buf, const DocIdAndFeatureSize &doc_id_and_feature_size, bool encode_cheap_features) +DocIdEncoder::write(ZcBuf &zc_buf, const DocIdAndFeatureSize &doc_id_and_feature_size, bool encode_interleaved_features) { _feature_pos += doc_id_and_feature_size._features_size; zc_buf.encode(doc_id_and_feature_size._doc_id - _doc_id - 1); _doc_id = doc_id_and_feature_size._doc_id; - if (encode_cheap_features) { + if (encode_interleaved_features) { assert(doc_id_and_feature_size._field_length > 0); zc_buf.encode(doc_id_and_feature_size._field_length - 1); assert(doc_id_and_feature_size._num_occs > 0); @@ -205,7 +205,7 @@ Zc4PostingWriterBase::Zc4PostingWriterBase(PostingListCounts &counts) _featureOffset(0), _writePos(0), _dynamicK(false), - _encode_cheap_features(false), + _encode_interleaved_features(false), _zcDocIds(), _l1Skip(), _l2Skip(), @@ -264,7 +264,7 @@ Zc4PostingWriterBase::calc_skip_info(bool encode_features) } } } - doc_id_encoder.write(_zcDocIds, doc_id_and_feature_size, _encode_cheap_features); + doc_id_encoder.write(_zcDocIds, doc_id_and_feature_size, _encode_interleaved_features); } // Extra partial entries for skip tables to simplify iterator during search l1_skip_encoder.write_partial_skip(_l1Skip, doc_id_encoder.get_doc_id()); @@ -289,7 +289,7 @@ Zc4PostingWriterBase::set_posting_list_params(const PostingListParams ¶ms) params.get("docIdLimit", _docIdLimit); params.get("minChunkDocs", _minChunkDocs); params.get("minSkipDocs", _minSkipDocs); - params.get("cheap_features", _encode_cheap_features); + params.get("interleaved_features", _encode_interleaved_features); } } diff --git a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.h b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.h index bb94e379c38..e9b1efa5c7d 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.h +++ b/searchlib/src/vespa/searchlib/diskindex/zc4_posting_writer_base.h @@ -43,7 +43,7 @@ protected: uint64_t _featureOffset; // Bit offset of next feature uint64_t _writePos; // Bit position for start of current word bool _dynamicK; // Caclulate EG compression parameters ? - bool _encode_cheap_features; + bool _encode_interleaved_features; ZcBuf _zcDocIds; // Document id deltas ZcBuf _l1Skip; // L1 skip info ZcBuf _l2Skip; // L2 skip info @@ -72,9 +72,9 @@ public: uint32_t get_docid_limit() const { return _docIdLimit; } uint64_t get_num_words() const { return _numWords; } bool get_dynamic_k() const { return _dynamicK; } - bool get_encode_cheap_features() const { return _encode_cheap_features; } + bool get_encode_interleaved_features() const { return _encode_interleaved_features; } void set_dynamic_k(bool dynamicK) { _dynamicK = dynamicK; } - void set_encode_cheap_features(bool encode_cheap_features) { _encode_cheap_features = encode_cheap_features; } + void set_encode_interleaved_features(bool encode_interleaved_features) { _encode_interleaved_features = encode_interleaved_features; } void set_posting_list_params(const index::PostingListParams ¶ms); }; diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp b/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp index b71a1505eac..d3df8792e0e 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.cpp @@ -17,13 +17,13 @@ using search::index::PostingListCounts; template <bool bigEndian, bool dynamic_k> ZcRareWordPosOccIterator<bigEndian, dynamic_k>:: ZcRareWordPosOccIterator(Position start, uint64_t bitLength, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features, + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features, const PosOccFieldsParams *fieldsParams, const TermFieldMatchDataArray &matchData) : ZcRareWordPostingIterator<bigEndian, dynamic_k>(matchData, start, docIdLimit, - decode_normal_features, decode_cheap_features, - unpack_normal_features, unpack_cheap_features), + decode_normal_features, decode_interleaved_features, + unpack_normal_features, unpack_interleaved_features), _decodeContextReal(start.getOccurences(), start.getBitOffset(), bitLength, fieldsParams) { assert(!matchData.valid() || (fieldsParams->getNumFields() == matchData.size())); @@ -33,14 +33,14 @@ ZcRareWordPosOccIterator(Position start, uint64_t bitLength, uint32_t docIdLimit template <bool bigEndian, bool dynamic_k> ZcPosOccIterator<bigEndian, dynamic_k>:: ZcPosOccIterator(Position start, uint64_t bitLength, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features, + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features, uint32_t minChunkDocs, const PostingListCounts &counts, const PosOccFieldsParams *fieldsParams, const TermFieldMatchDataArray &matchData) : ZcPostingIterator<bigEndian>(minChunkDocs, dynamic_k, counts, matchData, start, docIdLimit, - decode_normal_features, decode_cheap_features, - unpack_normal_features, unpack_cheap_features), + decode_normal_features, decode_interleaved_features, + unpack_normal_features, unpack_interleaved_features), _decodeContextReal(start.getOccurences(), start.getBitOffset(), bitLength, fieldsParams) { assert(!matchData.valid() || (fieldsParams->getNumFields() == matchData.size())); @@ -49,7 +49,7 @@ ZcPosOccIterator(Position start, uint64_t bitLength, uint32_t docIdLimit, template <bool bigEndian> std::unique_ptr<search::queryeval::SearchIterator> -create_zc_posocc_iterator(const PostingListCounts &counts, bitcompression::Position start, uint64_t bit_length, const Zc4PostingParams &posting_params, const bitcompression::PosOccFieldsParams &fields_params, const fef::TermFieldMatchDataArray &match_data, bool unpack_normal_features, bool unpack_cheap_features) +create_zc_posocc_iterator(const PostingListCounts &counts, bitcompression::Position start, uint64_t bit_length, const Zc4PostingParams &posting_params, const bitcompression::PosOccFieldsParams &fields_params, const fef::TermFieldMatchDataArray &match_data, bool unpack_normal_features, bool unpack_interleaved_features) { using EC = bitcompression::EncodeContext64<bigEndian>; bitcompression::DecodeContext64<bigEndian> d(start.getOccurences(), start.getBitOffset()); @@ -61,15 +61,15 @@ create_zc_posocc_iterator(const PostingListCounts &counts, bitcompression::Posit assert((num_docs == counts._numDocs) || ((num_docs == posting_params._min_chunk_docs) && (num_docs < counts._numDocs))); if (num_docs < posting_params._min_skip_docs) { if (posting_params._dynamic_k) { - return std::make_unique<ZcRareWordPosOccIterator<bigEndian, true>>(start, bit_length, posting_params._doc_id_limit, posting_params._encode_features, posting_params._encode_cheap_features, unpack_normal_features, unpack_cheap_features, &fields_params, match_data); + return std::make_unique<ZcRareWordPosOccIterator<bigEndian, true>>(start, bit_length, posting_params._doc_id_limit, posting_params._encode_features, posting_params._encode_interleaved_features, unpack_normal_features, unpack_interleaved_features, &fields_params, match_data); } else { - return std::make_unique<ZcRareWordPosOccIterator<bigEndian, false>>(start, bit_length, posting_params._doc_id_limit, posting_params._encode_features, posting_params._encode_cheap_features, unpack_normal_features, unpack_cheap_features, &fields_params, match_data); + return std::make_unique<ZcRareWordPosOccIterator<bigEndian, false>>(start, bit_length, posting_params._doc_id_limit, posting_params._encode_features, posting_params._encode_interleaved_features, unpack_normal_features, unpack_interleaved_features, &fields_params, match_data); } } else { if (posting_params._dynamic_k) { - return std::make_unique<ZcPosOccIterator<bigEndian, true>>(start, bit_length, posting_params._doc_id_limit, posting_params._encode_features, posting_params._encode_cheap_features, unpack_normal_features, unpack_cheap_features, posting_params._min_chunk_docs, counts, &fields_params, match_data); + return std::make_unique<ZcPosOccIterator<bigEndian, true>>(start, bit_length, posting_params._doc_id_limit, posting_params._encode_features, posting_params._encode_interleaved_features, unpack_normal_features, unpack_interleaved_features, posting_params._min_chunk_docs, counts, &fields_params, match_data); } else { - return std::make_unique<ZcPosOccIterator<bigEndian, false>>(start, bit_length, posting_params._doc_id_limit, posting_params._encode_features, posting_params._encode_cheap_features, unpack_normal_features, unpack_cheap_features, posting_params._min_chunk_docs, counts, &fields_params, match_data); + return std::make_unique<ZcPosOccIterator<bigEndian, false>>(start, bit_length, posting_params._doc_id_limit, posting_params._encode_features, posting_params._encode_interleaved_features, unpack_normal_features, unpack_interleaved_features, posting_params._min_chunk_docs, counts, &fields_params, match_data); } } } @@ -78,11 +78,11 @@ std::unique_ptr<search::queryeval::SearchIterator> create_zc_posocc_iterator(bool bigEndian, const PostingListCounts &counts, bitcompression::Position start, uint64_t bit_length, const Zc4PostingParams &posting_params, const bitcompression::PosOccFieldsParams &fields_params, const fef::TermFieldMatchDataArray &match_data) { bool unpack_normal_features = match_data.valid() ? match_data[0]->needs_normal_features() : false; - bool unpack_cheap_features = match_data.valid() ? match_data[0]->needs_cheap_features() : false; + bool unpack_interleaved_features = match_data.valid() ? match_data[0]->needs_interleaved_features() : false; if (bigEndian) { - return create_zc_posocc_iterator<true>(counts, start, bit_length, posting_params, fields_params, match_data, unpack_normal_features, unpack_cheap_features); + return create_zc_posocc_iterator<true>(counts, start, bit_length, posting_params, fields_params, match_data, unpack_normal_features, unpack_interleaved_features); } else { - return create_zc_posocc_iterator<false>(counts, start, bit_length, posting_params, fields_params, match_data, unpack_normal_features, unpack_cheap_features); + return create_zc_posocc_iterator<false>(counts, start, bit_length, posting_params, fields_params, match_data, unpack_normal_features, unpack_interleaved_features); } } diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.h b/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.h index 47c5455f259..3aeacd73735 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.h +++ b/searchlib/src/vespa/searchlib/diskindex/zcposocciterators.h @@ -20,8 +20,8 @@ private: DecodeContextReal _decodeContextReal; public: ZcRareWordPosOccIterator(Position start, uint64_t bitLength, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features, + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features, const bitcompression::PosOccFieldsParams *fieldsParams, const fef::TermFieldMatchDataArray &matchData); }; @@ -38,8 +38,8 @@ private: DecodeContext _decodeContextReal; public: ZcPosOccIterator(Position start, uint64_t bitLength, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features, + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features, uint32_t minChunkDocs, const index::PostingListCounts &counts, const bitcompression::PosOccFieldsParams *fieldsParams, const fef::TermFieldMatchDataArray &matchData); diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.cpp b/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.cpp index aa4f15bc225..5f699f8a450 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/zcposoccrandread.cpp @@ -26,7 +26,7 @@ namespace { vespalib::string myId4("Zc.4"); vespalib::string myId5("Zc.5"); -vespalib::string cheap_features("cheap_features"); +vespalib::string interleaved_features("interleaved_features"); } @@ -214,8 +214,8 @@ ZcPosOccRandRead::readHeader(const vespalib::string &identifier) _posting_params._min_chunk_docs = header.getTag("minChunkDocs").asInteger(); _posting_params._doc_id_limit = header.getTag("docIdLimit").asInteger(); _posting_params._min_skip_docs = header.getTag("minSkipDocs").asInteger(); - if (header.hasTag(cheap_features) && (header.getTag(cheap_features).asInteger() != 0)) { - _posting_params._encode_cheap_features = true; + if (header.hasTag(interleaved_features) && (header.getTag(interleaved_features).asInteger() != 0)) { + _posting_params._encode_interleaved_features = true; } // Read feature decoding specific subheader d.readHeader(header, "features."); diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp b/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp index b03085b0b55..3f154c44cb9 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp @@ -16,7 +16,7 @@ namespace { vespalib::string myId5("Zc.5"); vespalib::string myId4("Zc.4"); vespalib::string emptyId; -vespalib::string cheap_features("cheap_features"); +vespalib::string interleaved_features("interleaved_features"); } @@ -166,8 +166,8 @@ Zc4PostingSeqRead::readHeader() posting_params._min_chunk_docs = header.getTag("minChunkDocs").asInteger(); posting_params._doc_id_limit = header.getTag("docIdLimit").asInteger(); posting_params._min_skip_docs = header.getTag("minSkipDocs").asInteger(); - if (header.hasTag(cheap_features) && (header.getTag(cheap_features).asInteger() != 0)) { - posting_params._encode_cheap_features = true; + if (header.hasTag(interleaved_features) && (header.getTag(interleaved_features).asInteger() != 0)) { + posting_params._encode_interleaved_features = true; } assert(header.getTag("endian").asString() == "big"); // Read feature decoding specific subheader @@ -237,7 +237,7 @@ Zc4PostingSeqWrite::makeHeader(const FileHeaderContext &fileHeaderContext) header.putTag(Tag("fileBitSize", 0)); header.putTag(Tag("format.0", myId)); header.putTag(Tag("format.1", f.getIdentifier())); - header.putTag(Tag("cheap_features", _writer.get_encode_cheap_features() ? 1 : 0)); + header.putTag(Tag("interleaved_features", _writer.get_encode_interleaved_features() ? 1 : 0)); header.putTag(Tag("numWords", 0)); header.putTag(Tag("minChunkDocs", _writer.get_min_chunk_docs())); header.putTag(Tag("docIdLimit", _writer.get_docid_limit())); diff --git a/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp b/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp index 8573f674493..c90a2f7993c 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp +++ b/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.cpp @@ -38,17 +38,17 @@ ZcIteratorBase::initRange(uint32_t beginid, uint32_t endid) template <bool bigEndian> ZcRareWordPostingIteratorBase<bigEndian>:: ZcRareWordPostingIteratorBase(const TermFieldMatchDataArray &matchData, Position start, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features) + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features) : ZcIteratorBase(matchData, start, docIdLimit), _decodeContext(nullptr), _residue(0), _prevDocId(0), _numDocs(0), _decode_normal_features(decode_normal_features), - _decode_cheap_features(decode_cheap_features), + _decode_interleaved_features(decode_interleaved_features), _unpack_normal_features(unpack_normal_features), - _unpack_cheap_features(unpack_cheap_features), + _unpack_interleaved_features(unpack_interleaved_features), _field_length(0), _num_occs(0) { } @@ -57,11 +57,11 @@ ZcRareWordPostingIteratorBase(const TermFieldMatchDataArray &matchData, Position template <bool bigEndian, bool dynamic_k> ZcRareWordPostingIterator<bigEndian, dynamic_k>:: ZcRareWordPostingIterator(const TermFieldMatchDataArray &matchData, Position start, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features) + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features) : ZcRareWordPostingIteratorBase<bigEndian>(matchData, start, docIdLimit, - decode_normal_features, decode_cheap_features, - unpack_normal_features, unpack_cheap_features), + decode_normal_features, decode_interleaved_features, + unpack_normal_features, unpack_interleaved_features), _doc_id_k_param() { } @@ -88,7 +88,7 @@ ZcRareWordPostingIterator<bigEndian, dynamic_k>::doSeek(uint32_t docId) printf("Decode docId=%d\n", oDocId); #endif - if (_decode_cheap_features) { + if (_decode_interleaved_features) { UC64_DECODEEXPGOLOMB_NS(o, K_VALUE_ZCPOSTING_FIELD_LENGTH, EC); _field_length = static_cast<uint32_t>(val64) + 1; UC64_DECODEEXPGOLOMB_NS(o, K_VALUE_ZCPOSTING_NUM_OCCS, EC); @@ -110,7 +110,7 @@ ZcRareWordPostingIterator<bigEndian, dynamic_k>::doSeek(uint32_t docId) printf("Decode docId=%d\n", oDocId); #endif - if (_decode_cheap_features) { + if (_decode_interleaved_features) { UC64_DECODEEXPGOLOMB_NS(o, K_VALUE_ZCPOSTING_FIELD_LENGTH, EC); _field_length = static_cast<uint32_t>(val64) + 1; UC64_DECODEEXPGOLOMB_NS(o, K_VALUE_ZCPOSTING_NUM_OCCS, EC); @@ -144,7 +144,7 @@ ZcRareWordPostingIteratorBase<bigEndian>::doUnpack(uint32_t docId) } else { _matchData[0]->reset(docId); } - if (_decode_cheap_features && _unpack_cheap_features) { + if (_decode_interleaved_features && _unpack_interleaved_features) { TermFieldMatchData *tfmd = _matchData[0]; tfmd->setFieldLength(_field_length); tfmd->setNumOccs(_num_occs); @@ -174,7 +174,7 @@ ZcRareWordPostingIterator<bigEndian, dynamic_k>::readWordStart(uint32_t docIdLim _doc_id_k_param.setup(_numDocs, docIdLimit); UC64_DECODEEXPGOLOMB_NS(o, _doc_id_k_param.get_doc_id_k(), EC); uint32_t docId = static_cast<uint32_t>(val64) + 1; - if (_decode_cheap_features) { + if (_decode_interleaved_features) { UC64_DECODEEXPGOLOMB_NS(o, K_VALUE_ZCPOSTING_FIELD_LENGTH, EC); _field_length = static_cast<uint32_t>(val64) + 1; UC64_DECODEEXPGOLOMB_NS(o, K_VALUE_ZCPOSTING_NUM_OCCS, EC); @@ -188,8 +188,8 @@ ZcRareWordPostingIterator<bigEndian, dynamic_k>::readWordStart(uint32_t docIdLim } ZcPostingIteratorBase::ZcPostingIteratorBase(const TermFieldMatchDataArray &matchData, Position start, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features) + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features) : ZcIteratorBase(matchData, start, docIdLimit), _valI(nullptr), _valIBase(nullptr), @@ -202,9 +202,9 @@ ZcPostingIteratorBase::ZcPostingIteratorBase(const TermFieldMatchDataArray &matc _featuresSize(0), _hasMore(false), _decode_normal_features(decode_normal_features), - _decode_cheap_features(decode_cheap_features), + _decode_interleaved_features(decode_interleaved_features), _unpack_normal_features(unpack_normal_features), - _unpack_cheap_features(unpack_cheap_features), + _unpack_interleaved_features(unpack_interleaved_features), _chunkNo(0), _field_length(0), _num_occs(0) @@ -218,11 +218,11 @@ ZcPostingIterator(uint32_t minChunkDocs, const PostingListCounts &counts, const search::fef::TermFieldMatchDataArray &matchData, Position start, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features) + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features) : ZcPostingIteratorBase(matchData, start, docIdLimit, - decode_normal_features, decode_cheap_features, - unpack_normal_features, unpack_cheap_features), + decode_normal_features, decode_interleaved_features, + unpack_normal_features, unpack_interleaved_features), _decodeContext(nullptr), _minChunkDocs(minChunkDocs), _docIdK(0), @@ -558,7 +558,7 @@ ZcPostingIteratorBase::doSeek(uint32_t docId) printf("Decode docId=%d\n", oDocId); #endif - if (_decode_cheap_features) { + if (_decode_interleaved_features) { ZCDECODE(oCompr, field_length = 1 +); ZCDECODE(oCompr, num_occs = 1 +); } @@ -566,7 +566,7 @@ ZcPostingIteratorBase::doSeek(uint32_t docId) } _valI = oCompr; setDocId(oDocId); - if (_decode_cheap_features) { + if (_decode_interleaved_features) { _field_length = field_length; _num_occs = num_occs; } @@ -596,7 +596,7 @@ ZcPostingIterator<bigEndian>::doUnpack(uint32_t docId) } else { _matchData[0]->reset(docId); } - if (_decode_cheap_features && _unpack_cheap_features) { + if (_decode_interleaved_features && _unpack_interleaved_features) { TermFieldMatchData *tfmd = _matchData[0]; tfmd->setFieldLength(_field_length); tfmd->setNumOccs(_num_occs); diff --git a/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.h b/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.h index d97c276edfd..297659cd1fe 100644 --- a/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.h +++ b/searchlib/src/vespa/searchlib/diskindex/zcpostingiterators.h @@ -69,15 +69,15 @@ public: uint32_t _prevDocId; // Previous document id uint32_t _numDocs; // Documents in chunk or word bool _decode_normal_features; - bool _decode_cheap_features; + bool _decode_interleaved_features; bool _unpack_normal_features; - bool _unpack_cheap_features; + bool _unpack_interleaved_features; uint32_t _field_length; uint32_t _num_occs; ZcRareWordPostingIteratorBase(const fef::TermFieldMatchDataArray &matchData, Position start, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features); + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features); void doUnpack(uint32_t docId) override; void rewind(Position start) override; @@ -120,17 +120,17 @@ class ZcRareWordPostingIterator : public ZcRareWordPostingIteratorBase<bigEndian using ParentClass::setAtEnd; using ParentClass::_numDocs; using ParentClass::_decode_normal_features; - using ParentClass::_decode_cheap_features; + using ParentClass::_decode_interleaved_features; using ParentClass::_unpack_normal_features; - using ParentClass::_unpack_cheap_features; + using ParentClass::_unpack_interleaved_features; using ParentClass::_field_length; using ParentClass::_num_occs; ZcPostingDocIdKParam<dynamic_k> _doc_id_k_param; public: using ParentClass::_decodeContext; ZcRareWordPostingIterator(const fef::TermFieldMatchDataArray &matchData, Position start, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features); + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features); void doSeek(uint32_t docId) override; void readWordStart(uint32_t docIdLimit) override; }; @@ -276,9 +276,9 @@ protected: uint64_t _featuresSize; bool _hasMore; bool _decode_normal_features; - bool _decode_cheap_features; + bool _decode_interleaved_features; bool _unpack_normal_features; - bool _unpack_cheap_features; + bool _unpack_interleaved_features; uint32_t _chunkNo; uint32_t _field_length; uint32_t _num_occs; @@ -287,7 +287,7 @@ protected: uint32_t docId = prevDocId + 1; ZCDECODE(_valI, docId +=); setDocId(docId); - if (_decode_cheap_features) { + if (_decode_interleaved_features) { ZCDECODE(_valI, _field_length = 1 +); ZCDECODE(_valI, _num_occs = 1 +); } @@ -301,8 +301,8 @@ protected: void doSeek(uint32_t docId) override; public: ZcPostingIteratorBase(const fef::TermFieldMatchDataArray &matchData, Position start, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features); + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features); }; template <bool bigEndian> @@ -330,8 +330,8 @@ public: ZcPostingIterator(uint32_t minChunkDocs, bool dynamicK, const PostingListCounts &counts, const search::fef::TermFieldMatchDataArray &matchData, Position start, uint32_t docIdLimit, - bool decode_normal_features, bool decode_cheap_features, - bool unpack_normal_features, bool unpack_cheap_features); + bool decode_normal_features, bool decode_interleaved_features, + bool unpack_normal_features, bool unpack_interleaved_features); void doUnpack(uint32_t docId) override; diff --git a/searchlib/src/vespa/searchlib/expression/resultvector.h b/searchlib/src/vespa/searchlib/expression/resultvector.h index cd29178f24f..f1f863edf12 100644 --- a/searchlib/src/vespa/searchlib/expression/resultvector.h +++ b/searchlib/src/vespa/searchlib/expression/resultvector.h @@ -11,6 +11,7 @@ #include "stringbucketresultnode.h" #include "rawbucketresultnode.h" #include <vespa/vespalib/objects/visit.hpp> +#include <vespa/vespalib/stllike/identity.h> #include <algorithm> namespace search::expression { @@ -214,7 +215,7 @@ struct GetString { }; template <typename B> -class NumericResultNodeVectorT : public ResultNodeVectorT<B, cmpT<ResultNode>, std::_Identity<ResultNode> > +class NumericResultNodeVectorT : public ResultNodeVectorT<B, cmpT<ResultNode>, vespalib::Identity> { public: ResultNode & flattenMultiply(ResultNode & r) const override { @@ -366,7 +367,7 @@ public: const FloatBucketResultNode& getNullBucket() const override { return FloatBucketResultNode::getNull(); } }; -class StringResultNodeVector : public ResultNodeVectorT<StringResultNode, cmpT<ResultNode>, std::_Identity<ResultNode> > +class StringResultNodeVector : public ResultNodeVectorT<StringResultNode, cmpT<ResultNode>, vespalib::Identity> { public: StringResultNodeVector() { } @@ -375,7 +376,7 @@ public: const StringBucketResultNode& getNullBucket() const override { return StringBucketResultNode::getNull(); } }; -class RawResultNodeVector : public ResultNodeVectorT<RawResultNode, cmpT<ResultNode>, std::_Identity<ResultNode> > +class RawResultNodeVector : public ResultNodeVectorT<RawResultNode, cmpT<ResultNode>, vespalib::Identity> { public: RawResultNodeVector() { } diff --git a/searchlib/src/vespa/searchlib/features/bm25_feature.cpp b/searchlib/src/vespa/searchlib/features/bm25_feature.cpp index 6e889b48343..e16b4bba996 100644 --- a/searchlib/src/vespa/searchlib/features/bm25_feature.cpp +++ b/searchlib/src/vespa/searchlib/features/bm25_feature.cpp @@ -47,16 +47,17 @@ Bm25Executor::Bm25Executor(const fef::FieldInfo& field, : FeatureExecutor(), _terms(), _avg_field_length(avg_field_length), - _k1_param(k1_param), - _b_param(b_param) + _k1_mul_b(k1_param * b_param), + _k1_mul_one_minus_b(k1_param * (1 - b_param)) { for (size_t i = 0; i < env.getNumTerms(); ++i) { const ITermData* term = env.getTerm(i); for (size_t j = 0; j < term->numFields(); ++j) { const ITermFieldData& term_field = term->field(j); if (field.id() == term_field.getFieldId()) { - _terms.emplace_back(term_field.getHandle(MatchDataDetails::Cheap), - get_inverse_document_frequency(term_field, env, *term)); + _terms.emplace_back(term_field.getHandle(MatchDataDetails::Interleaved), + get_inverse_document_frequency(term_field, env, *term), + k1_param); } } } @@ -86,8 +87,8 @@ Bm25Executor::execute(uint32_t doc_id) feature_t num_occs = term.tfmd->getNumOccs(); feature_t norm_field_length = ((feature_t)term.tfmd->getFieldLength()) / _avg_field_length; - feature_t numerator = term.inverse_doc_freq * num_occs * (_k1_param + 1); - feature_t denominator = num_occs + (_k1_param * (1 - _b_param + (_b_param * norm_field_length))); + feature_t numerator = num_occs * term.idf_mul_k1_plus_one; + feature_t denominator = num_occs + (_k1_mul_one_minus_b + _k1_mul_b * norm_field_length); score += numerator / denominator; } diff --git a/searchlib/src/vespa/searchlib/features/bm25_feature.h b/searchlib/src/vespa/searchlib/features/bm25_feature.h index 533c7487a2f..0afd14e7ac8 100644 --- a/searchlib/src/vespa/searchlib/features/bm25_feature.h +++ b/searchlib/src/vespa/searchlib/features/bm25_feature.h @@ -13,11 +13,11 @@ private: struct QueryTerm { fef::TermFieldHandle handle; const fef::TermFieldMatchData* tfmd; - double inverse_doc_freq; - QueryTerm(fef::TermFieldHandle handle_, double inverse_doc_freq_) + double idf_mul_k1_plus_one; + QueryTerm(fef::TermFieldHandle handle_, double inverse_doc_freq, double k1_param) : handle(handle_), tfmd(nullptr), - inverse_doc_freq(inverse_doc_freq_) + idf_mul_k1_plus_one(inverse_doc_freq * (k1_param + 1)) {} }; @@ -25,8 +25,11 @@ private: QueryTermVector _terms; double _avg_field_length; - double _k1_param; // Determines term frequency saturation characteristics. - double _b_param; // Adjusts the effects of the field length of the document matched compared to the average field length. + + // The 'k1' param determines term frequency saturation characteristics. + // The 'b' param adjusts the effects of the field length of the document matched compared to the average field length. + double _k1_mul_b; + double _k1_mul_one_minus_b; public: Bm25Executor(const fef::FieldInfo& field, diff --git a/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp b/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp index 1560d043be2..e3f4cee4836 100644 --- a/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp +++ b/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp @@ -816,6 +816,16 @@ make_queryvector_key(const vespalib::string & base, const vespalib::string & sub return key; } +const vespalib::string & +make_queryvector_key_for_attribute(const IAttributeVector & attribute, const vespalib::string & key, vespalib::string & scratchPad) { + if (attribute.hasEnum() && (attribute.getCollectionType() == attribute::CollectionType::WSET)) { + scratchPad = key; + scratchPad.append(".").append(attribute.getName()); + return scratchPad; + } + return key; +} + vespalib::string make_attribute_key(const vespalib::string & base, const vespalib::string & subKey) { vespalib::string key(base); @@ -828,11 +838,12 @@ make_attribute_key(const vespalib::string & base, const vespalib::string & subKe const IAttributeVector * DotProductBlueprint::upgradeIfNecessary(const IAttributeVector * attribute, const IQueryEnvironment & env) const { - if ((attribute->getCollectionType() == attribute::CollectionType::WSET) && + if ((attribute != nullptr) && + (attribute->getCollectionType() == attribute::CollectionType::WSET) && attribute->hasEnum() && (attribute->isStringType() || attribute->isIntegerType())) { - attribute = env.getAttributeContext().getAttributeStableEnum(getAttribute(env)); + attribute = env.getAttributeContext().getAttributeStableEnum(attribute->getName()); } return attribute; } @@ -903,6 +914,7 @@ createQueryVector(const IQueryEnvironment & env, const IAttributeVector * attrib DotProductBlueprint::DotProductBlueprint() : Blueprint("dotProduct"), _defaultAttribute(), + _attributeOverride(), _queryVector(), _attrKey(), _queryVectorKey() @@ -910,10 +922,10 @@ DotProductBlueprint::DotProductBlueprint() : DotProductBlueprint::~DotProductBlueprint() = default; -vespalib::string +const vespalib::string & DotProductBlueprint::getAttribute(const IQueryEnvironment & env) const { - Property prop = env.getProperties().lookup(getBaseName(), _defaultAttribute + ".override.name"); + Property prop = env.getProperties().lookup(getBaseName(), _attributeOverride); if (prop.found() && !prop.get().empty()) { return prop.get(); } @@ -929,6 +941,7 @@ bool DotProductBlueprint::setup(const IIndexEnvironment & env, const ParameterList & params) { _defaultAttribute = params[0].getValue(); + _attributeOverride = _defaultAttribute + ".override.name"; _queryVector = params[1].getValue(); _attrKey = make_attribute_key(getBaseName(), _defaultAttribute); _queryVectorKey = make_queryvector_key(getBaseName(), _queryVector); @@ -959,7 +972,8 @@ DotProductBlueprint::prepareSharedState(const IQueryEnvironment & env, IObjectSt if (queryVector == nullptr) { fef::Anything::UP arguments = createQueryVector(env, attribute, getBaseName(), _queryVector); if (arguments) { - store.add(_queryVectorKey, std::move(arguments)); + vespalib::string scratchPad; + store.add(make_queryvector_key_for_attribute(*attribute, _queryVectorKey, scratchPad), std::move(arguments)); } } @@ -971,16 +985,20 @@ DotProductBlueprint::createExecutor(const IQueryEnvironment & env, vespalib::Sta { // Doing it "manually" here to avoid looking up attribute override unless needed. const fef::Anything * attributeArg = env.getObjectStore().get(_attrKey); - const IAttributeVector * attribute = (attributeArg != nullptr) - ? static_cast<const fef::AnyWrapper<const IAttributeVector *> *>(attributeArg)->getValue() - : env.getAttributeContext().getAttribute(getAttribute(env)); + const IAttributeVector * attribute = nullptr; + if (attributeArg != nullptr) { + attribute = static_cast<const fef::AnyWrapper<const IAttributeVector *> *>(attributeArg)->getValue(); + } else { + attribute = env.getAttributeContext().getAttribute(getAttribute(env)); + attribute = upgradeIfNecessary(attribute, env); + } if (attribute == nullptr) { LOG(warning, "The attribute vector '%s' was not found in the attribute manager, returning executor with default value.", getAttribute(env).c_str()); return stash.create<SingleZeroValueExecutor>(); } - attribute = upgradeIfNecessary(attribute, env); - const fef::Anything * queryVectorArg = env.getObjectStore().get(_queryVectorKey); + vespalib::string scratchPad; + const fef::Anything * queryVectorArg = env.getObjectStore().get(make_queryvector_key_for_attribute(*attribute, _queryVectorKey, scratchPad)); if (queryVectorArg != nullptr) { return createFromObject(attribute, *queryVectorArg, stash); } else { diff --git a/searchlib/src/vespa/searchlib/features/dotproductfeature.h b/searchlib/src/vespa/searchlib/features/dotproductfeature.h index d315a24ecb3..8d2b3d9de72 100644 --- a/searchlib/src/vespa/searchlib/features/dotproductfeature.h +++ b/searchlib/src/vespa/searchlib/features/dotproductfeature.h @@ -309,11 +309,12 @@ class DotProductBlueprint : public fef::Blueprint { private: using IAttributeVector = attribute::IAttributeVector; vespalib::string _defaultAttribute; + vespalib::string _attributeOverride; vespalib::string _queryVector; vespalib::string _attrKey; vespalib::string _queryVectorKey; - vespalib::string getAttribute(const fef::IQueryEnvironment & env) const; + const vespalib::string & getAttribute(const fef::IQueryEnvironment & env) const; const IAttributeVector * upgradeIfNecessary(const IAttributeVector * attribute, const fef::IQueryEnvironment & env) const; public: diff --git a/searchlib/src/vespa/searchlib/features/rankingexpression/intrinsic_blueprint_adapter.cpp b/searchlib/src/vespa/searchlib/features/rankingexpression/intrinsic_blueprint_adapter.cpp index 018da0e7bcd..4ff9d2f4e30 100644 --- a/searchlib/src/vespa/searchlib/features/rankingexpression/intrinsic_blueprint_adapter.cpp +++ b/searchlib/src/vespa/searchlib/features/rankingexpression/intrinsic_blueprint_adapter.cpp @@ -29,8 +29,11 @@ struct IntrinsicBlueprint : IntrinsicExpression { : blueprint(std::move(blueprint_in)), type(type_in) {} vespalib::string describe_self() const override { return blueprint->getName(); } const FeatureType &result_type() const override { return type; } - FeatureExecutor &create_executor(const QueryEnv &queryEnv, vespalib::Stash &stash) const override { - return blueprint->createExecutor(queryEnv, stash); + void prepare_shared_state(const QueryEnv & env, fef::IObjectStore & store) const override { + blueprint->prepareSharedState(env, store); + } + FeatureExecutor &create_executor(const QueryEnv &env, vespalib::Stash &stash) const override { + return blueprint->createExecutor(env, stash); } }; diff --git a/searchlib/src/vespa/searchlib/features/rankingexpression/intrinsic_expression.h b/searchlib/src/vespa/searchlib/features/rankingexpression/intrinsic_expression.h index 34c4f34b03f..79cb3f9035b 100644 --- a/searchlib/src/vespa/searchlib/features/rankingexpression/intrinsic_expression.h +++ b/searchlib/src/vespa/searchlib/features/rankingexpression/intrinsic_expression.h @@ -11,6 +11,7 @@ namespace search::fef { class FeatureType; class FeatureExecutor; class IQueryEnvironment; +class IObjectStore; } namespace search::features::rankingexpression { @@ -26,8 +27,8 @@ struct IntrinsicExpression { using UP = std::unique_ptr<IntrinsicExpression>; virtual vespalib::string describe_self() const = 0; virtual const FeatureType &result_type() const = 0; - virtual FeatureExecutor &create_executor(const QueryEnv &queryEnv, - vespalib::Stash &stash) const = 0; + virtual void prepare_shared_state(const QueryEnv & env, fef::IObjectStore & store) const = 0; + virtual FeatureExecutor &create_executor(const QueryEnv &queryEnv, vespalib::Stash &stash) const = 0; virtual ~IntrinsicExpression(); }; diff --git a/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.cpp b/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.cpp index b2c8c64d55a..2733ec62105 100644 --- a/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.cpp +++ b/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.cpp @@ -134,10 +134,14 @@ CompiledRankingExpressionExecutor::execute(uint32_t) //----------------------------------------------------------------------------- +namespace { + using Context = fef::FeatureExecutor::Inputs; double resolve_input(void *ctx, size_t idx) { return ((const Context *)(ctx))->get_number(idx); } Context *make_ctx(const Context &inputs) { return const_cast<Context *>(&inputs); } +} + LazyCompiledRankingExpressionExecutor::LazyCompiledRankingExpressionExecutor(const CompiledFunction &compiled_function) : _ranking_function(compiled_function.get_lazy_function()) { @@ -278,6 +282,14 @@ RankingExpressionBlueprint::createInstance() const return std::make_unique<RankingExpressionBlueprint>(_expression_replacer); } +void +RankingExpressionBlueprint::prepareSharedState(const fef::IQueryEnvironment & env, fef::IObjectStore & store) const +{ + if (_intrinsic_expression) { + return _intrinsic_expression->prepare_shared_state(env, store); + } +} + fef::FeatureExecutor & RankingExpressionBlueprint::createExecutor(const fef::IQueryEnvironment &env, vespalib::Stash &stash) const { diff --git a/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.h b/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.h index 8d5144206ea..104e8d63a70 100644 --- a/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.h +++ b/searchlib/src/vespa/searchlib/features/rankingexpressionfeature.h @@ -26,7 +26,7 @@ private: public: RankingExpressionBlueprint(); RankingExpressionBlueprint(rankingexpression::ExpressionReplacer::SP replacer); - ~RankingExpressionBlueprint(); + ~RankingExpressionBlueprint() override; void visitDumpFeatures(const fef::IIndexEnvironment &env, fef::IDumpFeatureVisitor &visitor) const override; fef::Blueprint::UP createInstance() const override; @@ -37,6 +37,7 @@ public: } bool setup(const fef::IIndexEnvironment & env, const fef::ParameterList & params) override; + void prepareSharedState(const fef::IQueryEnvironment & queryEnv, fef::IObjectStore & objectStore) const override; fef::FeatureExecutor &createExecutor(const fef::IQueryEnvironment &env, vespalib::Stash &stash) const override; }; diff --git a/searchlib/src/vespa/searchlib/fef/match_data_details.h b/searchlib/src/vespa/searchlib/fef/match_data_details.h index 16b26e5b526..9c4a248a4aa 100644 --- a/searchlib/src/vespa/searchlib/fef/match_data_details.h +++ b/searchlib/src/vespa/searchlib/fef/match_data_details.h @@ -10,12 +10,12 @@ namespace search::fef { * Normal: * Full match data positions should be available. This is the default. * - * Cheap: - * Cheap match data ('number of occurrences' and 'field length') should be available. + * Interleaved: + * Interleaved match data ('number of occurrences' and 'field length') should be available. */ enum class MatchDataDetails { Normal = 1, - Cheap = 2 + Interleaved = 2 }; } diff --git a/searchlib/src/vespa/searchlib/fef/termfieldmatchdata.h b/searchlib/src/vespa/searchlib/fef/termfieldmatchdata.h index 5f1b2ae2ffe..45de0d654fb 100644 --- a/searchlib/src/vespa/searchlib/fef/termfieldmatchdata.h +++ b/searchlib/src/vespa/searchlib/fef/termfieldmatchdata.h @@ -53,15 +53,15 @@ private: static constexpr uint16_t RAW_SCORE_FLAG = 1; static constexpr uint16_t MULTIPOS_FLAG = 2; static constexpr uint16_t UNPACK_NORMAL_FEATURES_FLAG = 4; - static constexpr uint16_t UNPACK_CHEAP_FEATURES_FLAG = 8; - static constexpr uint16_t UNPACK_ALL_FEATURES_MASK = UNPACK_NORMAL_FEATURES_FLAG | UNPACK_CHEAP_FEATURES_FLAG; + static constexpr uint16_t UNPACK_INTERLEAVED_FEATURES_FLAG = 8; + static constexpr uint16_t UNPACK_ALL_FEATURES_MASK = UNPACK_NORMAL_FEATURES_FLAG | UNPACK_INTERLEAVED_FEATURES_FLAG; uint32_t _docId; uint16_t _fieldId; uint16_t _flags; uint16_t _sz; - // Number of occurrences and field length used when unpacking "cheap" features. + // Number of occurrences and field length used when unpacking interleaved features. // This can exist in addition to full position features. uint16_t _numOccs; uint16_t _fieldLength; @@ -259,17 +259,17 @@ public: * This indicates if this instance is actually used for ranking or not. * @return true if it is not needed. */ - bool isNotNeeded() const { return ((_flags & (UNPACK_NORMAL_FEATURES_FLAG | UNPACK_CHEAP_FEATURES_FLAG)) == 0u); } + bool isNotNeeded() const { return ((_flags & (UNPACK_NORMAL_FEATURES_FLAG | UNPACK_INTERLEAVED_FEATURES_FLAG)) == 0u); } bool needs_normal_features() const { return ((_flags & UNPACK_NORMAL_FEATURES_FLAG) != 0u); } - bool needs_cheap_features() const { return ((_flags & UNPACK_CHEAP_FEATURES_FLAG) != 0u); } + bool needs_interleaved_features() const { return ((_flags & UNPACK_INTERLEAVED_FEATURES_FLAG) != 0u); } /** * Tag that this instance is not really used for ranking. */ void tagAsNotNeeded() { - _flags &= ~(UNPACK_NORMAL_FEATURES_FLAG | UNPACK_CHEAP_FEATURES_FLAG); + _flags &= ~(UNPACK_NORMAL_FEATURES_FLAG | UNPACK_INTERLEAVED_FEATURES_FLAG); } /** @@ -284,13 +284,13 @@ public: } /** - * Tag that this instance is used for ranking (cheap features) + * Tag that this instance is used for ranking (interleaved features) */ - void setNeedCheapFeatures(bool needed) { + void setNeedInterleavedFeatures(bool needed) { if (needed) { - _flags |= UNPACK_CHEAP_FEATURES_FLAG; + _flags |= UNPACK_INTERLEAVED_FEATURES_FLAG; } else { - _flags &= ~UNPACK_CHEAP_FEATURES_FLAG; + _flags &= ~UNPACK_INTERLEAVED_FEATURES_FLAG; } } diff --git a/searchlib/src/vespa/searchlib/fef/test/indexenvironment.cpp b/searchlib/src/vespa/searchlib/fef/test/indexenvironment.cpp index 755245438f2..e998e4d18bd 100644 --- a/searchlib/src/vespa/searchlib/fef/test/indexenvironment.cpp +++ b/searchlib/src/vespa/searchlib/fef/test/indexenvironment.cpp @@ -7,14 +7,6 @@ namespace search::fef::test { using vespalib::eval::ValueType; -using vespalib::eval::ErrorValue; - -namespace { - -IndexEnvironment::Constant notFoundError(ValueType::error_type(), - std::make_unique<ErrorValue>()); - -} IndexEnvironment::IndexEnvironment() = default; @@ -46,7 +38,7 @@ IndexEnvironment::getConstantValue(const vespalib::string &name) const if (it != _constants.end()) { return std::make_unique<ConstantRef>(it->second); } else { - return std::make_unique<ConstantRef>(notFoundError); + return vespalib::eval::ConstantValue::UP(nullptr); } } diff --git a/searchlib/src/vespa/searchlib/memoryindex/CMakeLists.txt b/searchlib/src/vespa/searchlib/memoryindex/CMakeLists.txt index 441fe12c383..c19596692cc 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/CMakeLists.txt +++ b/searchlib/src/vespa/searchlib/memoryindex/CMakeLists.txt @@ -5,6 +5,7 @@ vespa_add_library(searchlib_memoryindex OBJECT document_inverter.cpp feature_store.cpp field_index.cpp + field_index_base.cpp field_index_collection.cpp field_index_remover.cpp field_inverter.cpp diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp index e2e1c99a9b9..a5dc921cfdf 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/field_index.cpp @@ -2,27 +2,39 @@ #include "field_index.h" #include "ordered_field_index_inserter.h" -#include <vespa/vespalib/util/stringfmt.h> -#include <vespa/vespalib/util/exceptions.h> +#include "posting_iterator.h" #include <vespa/searchlib/bitcompression/posocccompression.h> +#include <vespa/searchlib/queryeval/booleanmatchiteratorwrapper.h> +#include <vespa/searchlib/queryeval/searchiterator.h> +#include <vespa/vespalib/btree/btree.hpp> +#include <vespa/vespalib/btree/btreeiterator.hpp> #include <vespa/vespalib/btree/btreenode.hpp> #include <vespa/vespalib/btree/btreenodeallocator.hpp> #include <vespa/vespalib/btree/btreenodestore.hpp> -#include <vespa/vespalib/btree/btreestore.hpp> -#include <vespa/vespalib/btree/btreeiterator.hpp> #include <vespa/vespalib/btree/btreeroot.hpp> -#include <vespa/vespalib/btree/btree.hpp> +#include <vespa/vespalib/btree/btreestore.hpp> #include <vespa/vespalib/util/array.hpp> +#include <vespa/vespalib/util/exceptions.h> +#include <vespa/vespalib/util/stringfmt.h> +#include <vespa/log/log.h> +LOG_SETUP(".searchlib.memoryindex.field_index"); + +using search::fef::TermFieldMatchDataArray; using search::index::DocIdAndFeatures; -using search::index::WordDocElementFeatures; using search::index::Schema; +using search::index::WordDocElementFeatures; +using search::queryeval::BooleanMatchIteratorWrapper; +using search::queryeval::FieldSpecBase; +using search::queryeval::SearchIterator; +using search::queryeval::SimpleLeafBlueprint; +using vespalib::GenerationHandler; namespace search::memoryindex { namespace { -void set_cheap_features(DocIdAndFeatures &features) +void set_interleaved_features(DocIdAndFeatures &features) { // Set cheap features based on normal features. // TODO: Update when proper cheap features are present in memory index. @@ -36,33 +48,24 @@ void set_cheap_features(DocIdAndFeatures &features) using datastore::EntryRef; -vespalib::asciistream & -operator<<(vespalib::asciistream & os, const FieldIndex::WordKey & rhs) -{ - os << "wr(" << rhs._wordRef.ref() << ")"; - return os; -} - -FieldIndex::FieldIndex(const index::Schema& schema, uint32_t fieldId) +template <bool interleaved_features> +FieldIndex<interleaved_features>::FieldIndex(const index::Schema& schema, uint32_t fieldId) : FieldIndex(schema, fieldId, index::FieldLengthInfo()) { } -FieldIndex::FieldIndex(const index::Schema& schema, uint32_t fieldId, const index::FieldLengthInfo& info) - : _wordStore(), - _numUniqueWords(0), - _generationHandler(), - _dict(), - _postingListStore(), - _featureStore(schema), - _fieldId(fieldId), - _remover(_wordStore), - _inserter(std::make_unique<OrderedFieldIndexInserter>(*this)), - _calculator(info) +template <bool interleaved_features> +FieldIndex<interleaved_features>::FieldIndex(const index::Schema& schema, uint32_t fieldId, + const index::FieldLengthInfo& info) + : FieldIndexBase(schema, fieldId, info), + _postingListStore() { + using InserterType = OrderedFieldIndexInserter<interleaved_features>; + _inserter = std::make_unique<InserterType>(*this); } -FieldIndex::~FieldIndex() +template <bool interleaved_features> +FieldIndex<interleaved_features>::~FieldIndex() { _postingListStore.disableFreeLists(); _postingListStore.disableElemHoldList(); @@ -89,28 +92,31 @@ FieldIndex::~FieldIndex() trimHoldLists(); } -FieldIndex::PostingList::Iterator -FieldIndex::find(const vespalib::stringref word) const +template <bool interleaved_features> +typename FieldIndex<interleaved_features>::PostingList::Iterator +FieldIndex<interleaved_features>::find(const vespalib::stringref word) const { DictionaryTree::Iterator itr = _dict.find(WordKey(EntryRef()), KeyComp(_wordStore, word)); if (itr.valid()) { return _postingListStore.begin(EntryRef(itr.getData())); } - return PostingList::Iterator(); + return typename PostingList::Iterator(); } -FieldIndex::PostingList::ConstIterator -FieldIndex::findFrozen(const vespalib::stringref word) const +template <bool interleaved_features> +typename FieldIndex<interleaved_features>::PostingList::ConstIterator +FieldIndex<interleaved_features>::findFrozen(const vespalib::stringref word) const { auto itr = _dict.getFrozenView().find(WordKey(EntryRef()), KeyComp(_wordStore, word)); if (itr.valid()) { return _postingListStore.beginFrozen(EntryRef(itr.getData())); } - return PostingList::Iterator(); + return typename PostingList::Iterator(); } +template <bool interleaved_features> void -FieldIndex::compactFeatures() +FieldIndex<interleaved_features>::compactFeatures() { std::vector<uint32_t> toHold; @@ -118,7 +124,7 @@ FieldIndex::compactFeatures() auto itr = _dict.begin(); uint32_t packedIndex = _fieldId; for (; itr.valid(); ++itr) { - PostingListStore::RefType pidx(EntryRef(itr.getData())); + typename PostingListStore::RefType pidx(EntryRef(itr.getData())); if (!pidx.valid()) { continue; } @@ -127,36 +133,35 @@ FieldIndex::compactFeatures() const PostingList *tree = _postingListStore.getTreeEntry(pidx); auto pitr = tree->begin(_postingListStore.getAllocator()); for (; pitr.valid(); ++pitr) { - EntryRef oldFeatures(pitr.getData()); + const PostingListEntryType& posting_entry(pitr.getData()); // Filter on which buffers to move features from when // performing incremental compaction. - EntryRef newFeatures = _featureStore.moveFeatures(packedIndex, oldFeatures); + EntryRef newFeatures = _featureStore.moveFeatures(packedIndex, posting_entry.get_features()); // Features must be written before reference is updated. std::atomic_thread_fence(std::memory_order_release); - // Ugly, ugly due to const_cast in iterator - pitr.writeData(newFeatures.ref()); + // Reference the moved data + posting_entry.update_features(newFeatures); } } else { const PostingListKeyDataType *shortArray = _postingListStore.getKeyDataEntry(pidx, clusterSize); const PostingListKeyDataType *ite = shortArray + clusterSize; for (const PostingListKeyDataType *it = shortArray; it < ite; ++it) { - EntryRef oldFeatures(it->getData()); + const PostingListEntryType& posting_entry(it->getData()); // Filter on which buffers to move features from when // performing incremental compaction. - EntryRef newFeatures = _featureStore.moveFeatures(packedIndex, oldFeatures); + EntryRef newFeatures = _featureStore.moveFeatures(packedIndex, posting_entry.get_features()); // Features must be written before reference is updated. std::atomic_thread_fence(std::memory_order_release); - // Ugly, ugly due to const_cast, but new data is - // semantically equal to old data - const_cast<PostingListKeyDataType *>(it)->setData(newFeatures.ref()); + // Reference the moved data + posting_entry.update_features(newFeatures); } } } @@ -166,8 +171,9 @@ FieldIndex::compactFeatures() _featureStore.transferHoldLists(generation); } +template <bool interleaved_features> void -FieldIndex::dump(search::index::IndexBuilder & indexBuilder) +FieldIndex<interleaved_features>::dump(search::index::IndexBuilder & indexBuilder) { vespalib::stringref word; FeatureStore::DecodeContextCooked decoder(nullptr); @@ -176,7 +182,7 @@ FieldIndex::dump(search::index::IndexBuilder & indexBuilder) _featureStore.setupForField(_fieldId, decoder); for (auto itr = _dict.begin(); itr.valid(); ++itr) { const WordKey & wk = itr.getKey(); - PostingListStore::RefType plist(EntryRef(itr.getData())); + typename PostingListStore::RefType plist(EntryRef(itr.getData())); word = _wordStore.getWord(wk._wordRef); if (!plist.valid()) { continue; @@ -189,11 +195,11 @@ FieldIndex::dump(search::index::IndexBuilder & indexBuilder) assert(pitr.valid()); for (; pitr.valid(); ++pitr) { uint32_t docId = pitr.getKey(); - EntryRef featureRef(pitr.getData()); + EntryRef featureRef(pitr.getData().get_features()); _featureStore.setupForReadFeatures(featureRef, decoder); decoder.readFeatures(features); features.set_doc_id(docId); - set_cheap_features(features); + set_interleaved_features(features); indexBuilder.add_document(features); } } else { @@ -202,11 +208,11 @@ FieldIndex::dump(search::index::IndexBuilder & indexBuilder) const PostingListKeyDataType *kde = kd + clusterSize; for (; kd != kde; ++kd) { uint32_t docId = kd->_key; - EntryRef featureRef(kd->getData()); + EntryRef featureRef(kd->getData().get_features()); _featureStore.setupForReadFeatures(featureRef, decoder); decoder.readFeatures(features); features.set_doc_id(docId); - set_cheap_features(features); + set_interleaved_features(features); indexBuilder.add_document(features); } } @@ -214,8 +220,9 @@ FieldIndex::dump(search::index::IndexBuilder & indexBuilder) } } +template <bool interleaved_features> vespalib::MemoryUsage -FieldIndex::getMemoryUsage() const +FieldIndex<interleaved_features>::getMemoryUsage() const { vespalib::MemoryUsage usage; usage.merge(_wordStore.getMemoryUsage()); @@ -226,83 +233,147 @@ FieldIndex::getMemoryUsage() const return usage; } +namespace { + +template <bool interleaved_features> +class MemoryTermBlueprint : public SimpleLeafBlueprint { +private: + using FieldIndexType = FieldIndex<interleaved_features>; + using PostingListIteratorType = typename FieldIndexType::PostingList::ConstIterator; + GenerationHandler::Guard _guard; + PostingListIteratorType _posting_itr; + const FeatureStore& _feature_store; + const uint32_t _field_id; + const bool _use_bit_vector; + +public: + MemoryTermBlueprint(GenerationHandler::Guard&& guard, + PostingListIteratorType posting_itr, + const FeatureStore& feature_store, + const FieldSpecBase& field, + uint32_t field_id, + bool use_bit_vector) + : SimpleLeafBlueprint(field), + _guard(), + _posting_itr(posting_itr), + _feature_store(feature_store), + _field_id(field_id), + _use_bit_vector(use_bit_vector) + { + _guard = std::move(guard); + HitEstimate estimate(_posting_itr.size(), !_posting_itr.valid()); + setEstimate(estimate); + } + + SearchIterator::UP createLeafSearch(const TermFieldMatchDataArray& tfmda, bool) const override { + using PostingIteratorType = PostingIterator<interleaved_features>; + auto result = std::make_unique<PostingIteratorType>(_posting_itr, _feature_store, _field_id, tfmda); + if (_use_bit_vector) { + LOG(debug, "Return BooleanMatchIteratorWrapper: field_id(%u), doc_count(%zu)", + _field_id, _posting_itr.size()); + return std::make_unique<BooleanMatchIteratorWrapper>(std::move(result), tfmda); + } + LOG(debug, "Return PostingIterator: field_id(%u), doc_count(%zu)", + _field_id, _posting_itr.size()); + return result; + } +}; + +} + +template <bool interleaved_features> +std::unique_ptr<queryeval::SimpleLeafBlueprint> +FieldIndex<interleaved_features>::make_term_blueprint(const vespalib::string& term, + const queryeval::FieldSpecBase& field, + uint32_t field_id) +{ + auto guard = takeGenerationGuard(); + auto posting_itr = findFrozen(term); + bool use_bit_vector = field.isFilter(); + return std::make_unique<MemoryTermBlueprint<interleaved_features>> + (std::move(guard), posting_itr, getFeatureStore(), field, field_id, use_bit_vector); +} + +template +class FieldIndex<false>; + } namespace search::btree { template -class BTreeNodeDataWrap<memoryindex::FieldIndex::WordKey, BTreeDefaultTraits::LEAF_SLOTS>; +class BTreeNodeDataWrap<memoryindex::FieldIndexBase::WordKey, BTreeDefaultTraits::LEAF_SLOTS>; template -class BTreeNodeT<memoryindex::FieldIndex::WordKey, BTreeDefaultTraits::INTERNAL_SLOTS>; +class BTreeNodeT<memoryindex::FieldIndexBase::WordKey, BTreeDefaultTraits::INTERNAL_SLOTS>; #if 0 template -class BTreeNodeT<memoryindex::FieldIndex::WordKey, +class BTreeNodeT<memoryindex::FieldIndexBase::WordKey, BTreeDefaultTraits::LEAF_SLOTS>; #endif template -class BTreeNodeTT<memoryindex::FieldIndex::WordKey, +class BTreeNodeTT<memoryindex::FieldIndexBase::WordKey, datastore::EntryRef, search::btree::NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS>; template -class BTreeNodeTT<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeNodeTT<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, BTreeDefaultTraits::LEAF_SLOTS>; template -class BTreeInternalNode<memoryindex::FieldIndex::WordKey, +class BTreeInternalNode<memoryindex::FieldIndexBase::WordKey, search::btree::NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS>; template -class BTreeLeafNode<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeLeafNode<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, BTreeDefaultTraits::LEAF_SLOTS>; template -class BTreeNodeStore<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeNodeStore<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS, BTreeDefaultTraits::LEAF_SLOTS>; template -class BTreeIterator<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeIterator<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, - const memoryindex::FieldIndex::KeyComp, + const memoryindex::FieldIndexBase::KeyComp, BTreeDefaultTraits>; template -class BTree<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTree<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, - const memoryindex::FieldIndex::KeyComp, + const memoryindex::FieldIndexBase::KeyComp, BTreeDefaultTraits>; template -class BTreeRoot<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeRoot<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, - const memoryindex::FieldIndex::KeyComp, + const memoryindex::FieldIndexBase::KeyComp, BTreeDefaultTraits>; template -class BTreeRootBase<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeRootBase<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS, BTreeDefaultTraits::LEAF_SLOTS>; template -class BTreeNodeAllocator<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeNodeAllocator<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS, BTreeDefaultTraits::LEAF_SLOTS>; diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index.h b/searchlib/src/vespa/searchlib/memoryindex/field_index.h index dba57f553b5..05665945800 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/field_index.h +++ b/searchlib/src/vespa/searchlib/memoryindex/field_index.h @@ -2,25 +2,20 @@ #pragma once -#include "feature_store.h" -#include "field_index_remover.h" -#include "word_store.h" -#include <vespa/searchlib/index/docidandfeatures.h> -#include <vespa/searchlib/index/field_length_calculator.h> +#include "field_index_base.h" +#include "posting_list_entry.h" #include <vespa/searchlib/index/indexbuilder.h> #include <vespa/vespalib/btree/btree.h> #include <vespa/vespalib/btree/btreenodeallocator.h> #include <vespa/vespalib/btree/btreeroot.h> #include <vespa/vespalib/btree/btreestore.h> -#include <vespa/vespalib/stllike/string.h> -#include <vespa/vespalib/util/memoryusage.h> namespace search::memoryindex { -class OrderedFieldIndexInserter; +class IOrderedFieldIndexInserter; /** - * Memory index for a single field using lock-free B-Trees in underlying components. + * Implementation of memory index for a single field using lock-free B-Trees in underlying components. * * It consists of the following components: * - WordStore containing all unique words in this field (across all documents). @@ -31,94 +26,24 @@ class OrderedFieldIndexInserter; * This information is unpacked and used during ranking. * * Elements in the three stores are accessed using 32-bit references / handles. + * + * The template parameter specifies whether the underlying posting lists have interleaved features or not. */ -class FieldIndex { +template <bool interleaved_features> +class FieldIndex : public FieldIndexBase { public: // Mapping from docid -> feature ref - using PostingList = btree::BTreeRoot<uint32_t, uint32_t, search::btree::NoAggregated>; - using PostingListStore = btree::BTreeStore<uint32_t, uint32_t, + using PostingListEntryType = PostingListEntry<interleaved_features>; + using PostingList = btree::BTreeRoot<uint32_t, PostingListEntryType, search::btree::NoAggregated>; + using PostingListStore = btree::BTreeStore<uint32_t, PostingListEntryType, search::btree::NoAggregated, std::less<uint32_t>, btree::BTreeDefaultTraits>; - using PostingListKeyDataType = PostingListStore::KeyDataType; - - struct WordKey { - datastore::EntryRef _wordRef; - - explicit WordKey(datastore::EntryRef wordRef) : _wordRef(wordRef) { } - WordKey() : _wordRef() { } - - friend vespalib::asciistream & - operator<<(vespalib::asciistream & os, const WordKey & rhs); - }; - - class KeyComp { - private: - const WordStore &_wordStore; - const vespalib::stringref _word; - - const char *getWord(datastore::EntryRef wordRef) const { - if (wordRef.valid()) { - return _wordStore.getWord(wordRef); - } - return _word.data(); - } - - public: - KeyComp(const WordStore &wordStore, const vespalib::stringref word) - : _wordStore(wordStore), - _word(word) - { } - - bool operator()(const WordKey & lhs, const WordKey & rhs) const { - int cmpres = strcmp(getWord(lhs._wordRef), getWord(rhs._wordRef)); - return cmpres < 0; - } - }; - - using PostingListPtr = uint32_t; - using DictionaryTree = btree::BTree<WordKey, PostingListPtr, - search::btree::NoAggregated, - const KeyComp>; -private: - using GenerationHandler = vespalib::GenerationHandler; - - WordStore _wordStore; - uint64_t _numUniqueWords; - GenerationHandler _generationHandler; - DictionaryTree _dict; - PostingListStore _postingListStore; - FeatureStore _featureStore; - uint32_t _fieldId; - FieldIndexRemover _remover; - std::unique_ptr<OrderedFieldIndexInserter> _inserter; - index::FieldLengthCalculator _calculator; - -public: - datastore::EntryRef addWord(const vespalib::stringref word) { - _numUniqueWords++; - return _wordStore.addWord(word); - } - - datastore::EntryRef addFeatures(const index::DocIdAndFeatures &features) { - return _featureStore.addFeatures(_fieldId, features).first; - } - - FieldIndex(const index::Schema& schema, uint32_t fieldId); - FieldIndex(const index::Schema& schema, uint32_t fieldId, const index::FieldLengthInfo& info); - ~FieldIndex(); - PostingList::Iterator find(const vespalib::stringref word) const; - - PostingList::ConstIterator - findFrozen(const vespalib::stringref word) const; - - uint64_t getNumUniqueWords() const { return _numUniqueWords; } - const FeatureStore & getFeatureStore() const { return _featureStore; } - const WordStore &getWordStore() const { return _wordStore; } - OrderedFieldIndexInserter &getInserter() const { return *_inserter; } - index::FieldLengthCalculator &get_calculator() { return _calculator; } + using PostingListKeyDataType = typename PostingListStore::KeyDataType; private: + PostingListStore _postingListStore; + void freeze() { _postingListStore.freeze(); _dict.getAllocator().freeze(); @@ -145,27 +70,31 @@ private: } public: - GenerationHandler::Guard takeGenerationGuard() { - return _generationHandler.takeGuard(); - } + FieldIndex(const index::Schema& schema, uint32_t fieldId); + FieldIndex(const index::Schema& schema, uint32_t fieldId, const index::FieldLengthInfo& info); + ~FieldIndex(); - void - compactFeatures(); + typename PostingList::Iterator find(const vespalib::stringref word) const; + typename PostingList::ConstIterator findFrozen(const vespalib::stringref word) const; - void dump(search::index::IndexBuilder & indexBuilder); + void compactFeatures() override; - vespalib::MemoryUsage getMemoryUsage() const; - DictionaryTree &getDictionaryTree() { return _dict; } + void dump(search::index::IndexBuilder & indexBuilder) override; + + vespalib::MemoryUsage getMemoryUsage() const override; PostingListStore &getPostingListStore() { return _postingListStore; } - FieldIndexRemover &getDocumentRemover() { return _remover; } - void commit() { + void commit() override { _remover.flush(); freeze(); transferHoldLists(); incGeneration(); trimHoldLists(); } + + std::unique_ptr<queryeval::SimpleLeafBlueprint> make_term_blueprint(const vespalib::string& term, + const queryeval::FieldSpecBase& field, + uint32_t field_id) override; }; } @@ -173,82 +102,82 @@ public: namespace search::btree { extern template -class BTreeNodeDataWrap<memoryindex::FieldIndex::WordKey, +class BTreeNodeDataWrap<memoryindex::FieldIndexBase::WordKey, BTreeDefaultTraits::LEAF_SLOTS>; extern template -class BTreeNodeT<memoryindex::FieldIndex::WordKey, +class BTreeNodeT<memoryindex::FieldIndexBase::WordKey, BTreeDefaultTraits::INTERNAL_SLOTS>; #if 0 extern template -class BTreeNodeT<memoryindex::FieldIndex::WordKey, +class BTreeNodeT<memoryindex::FieldIndexBase::WordKey, BTreeDefaultTraits::LEAF_SLOTS>; #endif extern template -class BTreeNodeTT<memoryindex::FieldIndex::WordKey, +class BTreeNodeTT<memoryindex::FieldIndexBase::WordKey, datastore::EntryRef, search::btree::NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS>; extern template -class BTreeNodeTT<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeNodeTT<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, BTreeDefaultTraits::LEAF_SLOTS>; extern template -class BTreeInternalNode<memoryindex::FieldIndex::WordKey, +class BTreeInternalNode<memoryindex::FieldIndexBase::WordKey, search::btree::NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS>; extern template -class BTreeLeafNode<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeLeafNode<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, BTreeDefaultTraits::LEAF_SLOTS>; extern template -class BTreeNodeStore<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeNodeStore<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS, BTreeDefaultTraits::LEAF_SLOTS>; extern template -class BTreeIterator<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeIterator<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, - const memoryindex::FieldIndex::KeyComp, + const memoryindex::FieldIndexBase::KeyComp, BTreeDefaultTraits>; extern template -class BTree<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTree<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, - const memoryindex::FieldIndex::KeyComp, + const memoryindex::FieldIndexBase::KeyComp, BTreeDefaultTraits>; extern template -class BTreeRoot<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeRoot<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, - const memoryindex::FieldIndex::KeyComp, - BTreeDefaultTraits>; + const memoryindex::FieldIndexBase::KeyComp, + BTreeDefaultTraits>; extern template -class BTreeRootBase<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeRootBase<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, BTreeDefaultTraits::INTERNAL_SLOTS, BTreeDefaultTraits::LEAF_SLOTS>; extern template -class BTreeNodeAllocator<memoryindex::FieldIndex::WordKey, - memoryindex::FieldIndex::PostingListPtr, +class BTreeNodeAllocator<memoryindex::FieldIndexBase::WordKey, + memoryindex::FieldIndexBase::PostingListPtr, search::btree::NoAggregated, - BTreeDefaultTraits::INTERNAL_SLOTS, - BTreeDefaultTraits::LEAF_SLOTS>; + BTreeDefaultTraits::INTERNAL_SLOTS, + BTreeDefaultTraits::LEAF_SLOTS>; } diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_base.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index_base.cpp new file mode 100644 index 00000000000..ee1fee3d935 --- /dev/null +++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_base.cpp @@ -0,0 +1,36 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#include "field_index_base.h" +#include "i_ordered_field_index_inserter.h" +#include <vespa/vespalib/stllike/asciistream.h> + +namespace search::memoryindex { + +vespalib::asciistream & +operator<<(vespalib::asciistream& os, const FieldIndexBase::WordKey& rhs) +{ + os << "wr(" << rhs._wordRef.ref() << ")"; + return os; +} + +FieldIndexBase::FieldIndexBase(const index::Schema& schema, uint32_t fieldId) + : FieldIndexBase(schema, fieldId, index::FieldLengthInfo()) +{ +} + +FieldIndexBase::FieldIndexBase(const index::Schema& schema, uint32_t fieldId, + const index::FieldLengthInfo& info) + : _wordStore(), + _numUniqueWords(0), + _generationHandler(), + _dict(), + _featureStore(schema), + _fieldId(fieldId), + _remover(_wordStore), + _inserter(), + _calculator(info) +{ +} + +} + diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h b/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h new file mode 100644 index 00000000000..7efec1f2ae8 --- /dev/null +++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h @@ -0,0 +1,119 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include "feature_store.h" +#include "field_index_remover.h" +#include "i_field_index.h" +#include "word_store.h" +#include <vespa/searchlib/index/docidandfeatures.h> +#include <vespa/searchlib/index/field_length_calculator.h> +#include <vespa/vespalib/btree/btree.h> +#include <vespa/vespalib/btree/btreenodeallocator.h> +#include <vespa/vespalib/btree/btreeroot.h> +#include <vespa/vespalib/stllike/string.h> +#include <vespa/vespalib/util/memoryusage.h> + +namespace search::memoryindex { + +class IOrderedFieldIndexInserter; + +/** + * Abstract base class for implementation of memory index for a single field. + * + * Contains all components that are not dependent of the posting list format. + */ +class FieldIndexBase : public IFieldIndex { +public: + /** + * Class representing a word used as key in the dictionary. + */ + struct WordKey { + datastore::EntryRef _wordRef; + + explicit WordKey(datastore::EntryRef wordRef) : _wordRef(wordRef) { } + WordKey() : _wordRef() { } + + friend vespalib::asciistream& + operator<<(vespalib::asciistream& os, const WordKey& rhs); + }; + + /** + * Comparator class for words used in the dictionary. + */ + class KeyComp { + private: + const WordStore& _wordStore; + const vespalib::stringref _word; + + const char* getWord(datastore::EntryRef wordRef) const { + if (wordRef.valid()) { + return _wordStore.getWord(wordRef); + } + return _word.data(); + } + + public: + KeyComp(const WordStore& wordStore, const vespalib::stringref word) + : _wordStore(wordStore), + _word(word) + { } + + bool operator()(const WordKey& lhs, const WordKey& rhs) const { + int cmpres = strcmp(getWord(lhs._wordRef), getWord(rhs._wordRef)); + return cmpres < 0; + } + }; + + using PostingListPtr = uint32_t; + using DictionaryTree = btree::BTree<WordKey, PostingListPtr, + search::btree::NoAggregated, + const KeyComp>; + +protected: + using GenerationHandler = vespalib::GenerationHandler; + + WordStore _wordStore; + uint64_t _numUniqueWords; + GenerationHandler _generationHandler; + DictionaryTree _dict; + FeatureStore _featureStore; + uint32_t _fieldId; + FieldIndexRemover _remover; + std::unique_ptr<IOrderedFieldIndexInserter> _inserter; + index::FieldLengthCalculator _calculator; + + void incGeneration() { + _generationHandler.incGeneration(); + } + +public: + datastore::EntryRef addWord(const vespalib::stringref word) { + _numUniqueWords++; + return _wordStore.addWord(word); + } + + datastore::EntryRef addFeatures(const index::DocIdAndFeatures& features) { + return _featureStore.addFeatures(_fieldId, features).first; + } + + FieldIndexBase(const index::Schema& schema, uint32_t fieldId); + FieldIndexBase(const index::Schema& schema, uint32_t fieldId, const index::FieldLengthInfo& info); + + uint64_t getNumUniqueWords() const override { return _numUniqueWords; } + const FeatureStore& getFeatureStore() const override { return _featureStore; } + const WordStore& getWordStore() const override { return _wordStore; } + IOrderedFieldIndexInserter& getInserter() override { return *_inserter; } + index::FieldLengthCalculator& get_calculator() override { return _calculator; } + + GenerationHandler::Guard takeGenerationGuard() override { + return _generationHandler.takeGuard(); + } + + DictionaryTree& getDictionaryTree() { return _dict; } + FieldIndexRemover& getDocumentRemover() override { return _remover; } + +}; + +} + diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp b/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp index 40b1e8f360f..dc7d35a755d 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.cpp @@ -34,7 +34,7 @@ FieldIndexCollection::FieldIndexCollection(const Schema& schema, const IFieldLen { for (uint32_t fieldId = 0; fieldId < _numFields; ++fieldId) { const auto& field = schema.getIndexField(fieldId); - auto fieldIndex = std::make_unique<FieldIndex>(schema, fieldId, inspector.get_field_length_info(field.getName())); + auto fieldIndex = std::make_unique<FieldIndex<false>>(schema, fieldId, inspector.get_field_length_info(field.getName())); _fieldIndexes.push_back(std::move(fieldIndex)); } } diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.h b/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.h index 53f42658d0a..a737175d346 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.h +++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.h @@ -3,9 +3,14 @@ #pragma once #include "i_field_index_collection.h" -#include "field_index.h" +#include "i_field_index.h" +#include <memory> +#include <vector> -namespace search::index { class IFieldLengthInspector; } +namespace search::index { + class IFieldLengthInspector; + class Schema; +} namespace search::memoryindex { @@ -19,26 +24,15 @@ class FieldInverter; * for a given word in a given field. */ class FieldIndexCollection : public IFieldIndexCollection { -public: - using PostingList = FieldIndex::PostingList; - private: using GenerationHandler = vespalib::GenerationHandler; - std::vector<std::unique_ptr<FieldIndex>> _fieldIndexes; + std::vector<std::unique_ptr<IFieldIndex>> _fieldIndexes; uint32_t _numFields; public: FieldIndexCollection(const index::Schema& schema, const index::IFieldLengthInspector& inspector); ~FieldIndexCollection(); - PostingList::Iterator find(const vespalib::stringref word, - uint32_t fieldId) const { - return _fieldIndexes[fieldId]->find(word); - } - - PostingList::ConstIterator findFrozen(const vespalib::stringref word, uint32_t fieldId) const { - return _fieldIndexes[fieldId]->findFrozen(word); - } uint64_t getNumUniqueWords() const { uint64_t numUniqueWords = 0; @@ -52,11 +46,11 @@ public: vespalib::MemoryUsage getMemoryUsage() const; - FieldIndex *getFieldIndex(uint32_t fieldId) const { + IFieldIndex *getFieldIndex(uint32_t fieldId) const { return _fieldIndexes[fieldId].get(); } - const std::vector<std::unique_ptr<FieldIndex>> &getFieldIndexes() const { return _fieldIndexes; } + const std::vector<std::unique_ptr<IFieldIndex>> &getFieldIndexes() const { return _fieldIndexes; } uint32_t getNumFields() const { return _numFields; } diff --git a/searchlib/src/vespa/searchlib/memoryindex/i_field_index.h b/searchlib/src/vespa/searchlib/memoryindex/i_field_index.h new file mode 100644 index 00000000000..86082c08d36 --- /dev/null +++ b/searchlib/src/vespa/searchlib/memoryindex/i_field_index.h @@ -0,0 +1,47 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +#pragma once + +#include <vespa/searchlib/queryeval/blueprint.h> +#include <vespa/vespalib/util/generationhandler.h> +#include <vespa/vespalib/util/memoryusage.h> + +namespace search::index { +class FieldLengthCalculator; +class IndexBuilder; +} + +namespace search::memoryindex { + +class FeatureStore; +class FieldIndexRemover; +class IOrderedFieldIndexInserter; +class WordStore; + +/** + * Interface for a memory index for a single field as seen from the FieldIndexCollection. + */ +class IFieldIndex { +public: + virtual ~IFieldIndex() {} + + virtual uint64_t getNumUniqueWords() const = 0; + virtual vespalib::MemoryUsage getMemoryUsage() const = 0; + virtual const FeatureStore& getFeatureStore() const = 0; + virtual const WordStore& getWordStore() const = 0; + virtual IOrderedFieldIndexInserter& getInserter() = 0; + virtual FieldIndexRemover& getDocumentRemover() = 0; + virtual index::FieldLengthCalculator& get_calculator() = 0; + virtual void compactFeatures() = 0; + virtual void dump(search::index::IndexBuilder& indexBuilder) = 0; + + virtual std::unique_ptr<queryeval::SimpleLeafBlueprint> make_term_blueprint(const vespalib::string& term, + const queryeval::FieldSpecBase& field, + uint32_t field_id) = 0; + + // Should only be directly used by unit tests + virtual vespalib::GenerationHandler::Guard takeGenerationGuard() = 0; + virtual void commit() = 0; +}; + +} diff --git a/searchlib/src/vespa/searchlib/memoryindex/i_ordered_field_index_inserter.h b/searchlib/src/vespa/searchlib/memoryindex/i_ordered_field_index_inserter.h index cf10db3c4d8..4da0844da58 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/i_ordered_field_index_inserter.h +++ b/searchlib/src/vespa/searchlib/memoryindex/i_ordered_field_index_inserter.h @@ -3,6 +3,7 @@ #pragma once #include <vespa/vespalib/stllike/string.h> +#include <vespa/vespalib/datastore/entryref.h> #include <cstdint> namespace search::index { class DocIdAndFeatures; } @@ -30,6 +31,11 @@ public: virtual void add(uint32_t docId, const index::DocIdAndFeatures &features) = 0; /** + * Returns the reference to the current word (only used by unit tests). + */ + virtual datastore::EntryRef getWordRef() const = 0; + + /** * Remove (word, docId) tuple. */ virtual void remove(uint32_t docId) = 0; diff --git a/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp b/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp index 6686745f8c2..d3d3004100c 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/memory_index.cpp @@ -3,16 +3,15 @@ #include "document_inverter.h" #include "field_index_collection.h" #include "memory_index.h" -#include "posting_iterator.h" #include <vespa/document/fieldvalue/arrayfieldvalue.h> #include <vespa/document/fieldvalue/document.h> -#include <vespa/vespalib/btree/btreenodeallocator.hpp> #include <vespa/searchlib/common/sequencedtaskexecutor.h> +#include <vespa/searchlib/index/field_length_calculator.h> #include <vespa/searchlib/index/schemautil.h> -#include <vespa/searchlib/queryeval/booleanmatchiteratorwrapper.h> #include <vespa/searchlib/queryeval/create_blueprint_visitor_helper.h> #include <vespa/searchlib/queryeval/emptysearch.h> #include <vespa/searchlib/queryeval/leaf_blueprints.h> +#include <vespa/vespalib/btree/btreenodeallocator.hpp> #include <vespa/log/log.h> LOG_SETUP(".searchlib.memoryindex.memory_index"); @@ -20,19 +19,17 @@ LOG_SETUP(".searchlib.memoryindex.memory_index"); using document::ArrayFieldValue; using document::WeightedSetFieldValue; using vespalib::LockGuard; -using vespalib::GenerationHandler; namespace search { -using fef::TermFieldMatchDataArray; using index::FieldLengthInfo; using index::IFieldLengthInspector; using index::IndexBuilder; using index::Schema; using index::SchemaUtil; -using query::NumberTerm; using query::LocationTerm; using query::Node; +using query::NumberTerm; using query::PredicateQuery; using query::PrefixTerm; using query::RangeTerm; @@ -40,16 +37,12 @@ using query::RegExpTerm; using query::StringTerm; using query::SubstringTerm; using query::SuffixTerm; -using queryeval::SearchIterator; -using queryeval::Searchable; -using queryeval::CreateBlueprintVisitorHelper; using queryeval::Blueprint; -using queryeval::BooleanMatchIteratorWrapper; +using queryeval::CreateBlueprintVisitorHelper; using queryeval::EmptyBlueprint; -using queryeval::FieldSpecBase; -using queryeval::FieldSpecBaseList; using queryeval::FieldSpec; using queryeval::IRequestContext; +using queryeval::Searchable; } @@ -141,47 +134,6 @@ MemoryIndex::dump(IndexBuilder &indexBuilder) namespace { -class MemTermBlueprint : public queryeval::SimpleLeafBlueprint { -private: - GenerationHandler::Guard _genGuard; - FieldIndex::PostingList::ConstIterator _pitr; - const FeatureStore &_featureStore; - const uint32_t _fieldId; - const bool _useBitVector; - -public: - MemTermBlueprint(GenerationHandler::Guard &&genGuard, - FieldIndex::PostingList::ConstIterator pitr, - const FeatureStore &featureStore, - const FieldSpecBase &field, - uint32_t fieldId, - bool useBitVector) - : SimpleLeafBlueprint(field), - _genGuard(), - _pitr(pitr), - _featureStore(featureStore), - _fieldId(fieldId), - _useBitVector(useBitVector) - { - _genGuard = std::move(genGuard); - HitEstimate estimate(_pitr.size(), !_pitr.valid()); - setEstimate(estimate); - } - - SearchIterator::UP createLeafSearch(const TermFieldMatchDataArray &tfmda, bool) const override { - auto search = std::make_unique<PostingIterator>(_pitr, _featureStore, _fieldId, tfmda); - if (_useBitVector) { - LOG(debug, "Return BooleanMatchIteratorWrapper: fieldId(%u), docCount(%zu)", - _fieldId, _pitr.size()); - return std::make_unique<BooleanMatchIteratorWrapper>(std::move(search), tfmda); - } - LOG(debug, "Return PostingIterator: fieldId(%u), docCount(%zu)", - _fieldId, _pitr.size()); - return search; - } - -}; - /** * Determines the correct Blueprint to use. **/ @@ -207,13 +159,8 @@ public: const vespalib::string termStr = queryeval::termAsString(n); LOG(debug, "searching for '%s' in '%s'", termStr.c_str(), _field.getName().c_str()); - FieldIndex *fieldIndex = _fieldIndexes.getFieldIndex(_fieldId); - GenerationHandler::Guard genGuard = fieldIndex->takeGenerationGuard(); - FieldIndex::PostingList::ConstIterator pitr = fieldIndex->findFrozen(termStr); - bool useBitVector = _field.isFilter(); - setResult(std::make_unique<MemTermBlueprint>(std::move(genGuard), pitr, - fieldIndex->getFeatureStore(), - _field, _fieldId, useBitVector)); + IFieldIndex* fieldIndex = _fieldIndexes.getFieldIndex(_fieldId); + setResult(fieldIndex->make_term_blueprint(termStr, _field, _fieldId)); } void visit(LocationTerm &n) override { visitTerm(n); } diff --git a/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp b/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp index 637a13d67be..c6524a2fc64 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.cpp @@ -27,7 +27,8 @@ const vespalib::string emptyWord = ""; } -OrderedFieldIndexInserter::OrderedFieldIndexInserter(FieldIndex &fieldIndex) +template <bool interleaved_features> +OrderedFieldIndexInserter<interleaved_features>::OrderedFieldIndexInserter(FieldIndexType& fieldIndex) : _word(), _prevDocId(noDocId), _prevAdd(false), @@ -39,10 +40,12 @@ OrderedFieldIndexInserter::OrderedFieldIndexInserter(FieldIndex &fieldIndex) { } -OrderedFieldIndexInserter::~OrderedFieldIndexInserter() = default; +template <bool interleaved_features> +OrderedFieldIndexInserter<interleaved_features>::~OrderedFieldIndexInserter() = default; +template <bool interleaved_features> void -OrderedFieldIndexInserter::flushWord() +OrderedFieldIndexInserter<interleaved_features>::flushWord() { if (_removes.empty() && _adds.empty()) { return; @@ -64,21 +67,24 @@ OrderedFieldIndexInserter::flushWord() _adds.clear(); } +template <bool interleaved_features> void -OrderedFieldIndexInserter::flush() +OrderedFieldIndexInserter<interleaved_features>::flush() { flushWord(); _listener.flush(); } +template <bool interleaved_features> void -OrderedFieldIndexInserter::commit() +OrderedFieldIndexInserter<interleaved_features>::commit() { _fieldIndex.commit(); } +template <bool interleaved_features> void -OrderedFieldIndexInserter::setNextWord(const vespalib::stringref word) +OrderedFieldIndexInserter<interleaved_features>::setNextWord(const vespalib::stringref word) { // TODO: Adjust here if zero length words should be legal. assert(_word < word); @@ -103,22 +109,24 @@ OrderedFieldIndexInserter::setNextWord(const vespalib::stringref word) assert(_word == wordStore.getWord(_dItr.getKey()._wordRef)); } +template <bool interleaved_features> void -OrderedFieldIndexInserter::add(uint32_t docId, - const index::DocIdAndFeatures &features) +OrderedFieldIndexInserter<interleaved_features>::add(uint32_t docId, + const index::DocIdAndFeatures &features) { assert(docId != noDocId); assert(_prevDocId == noDocId || _prevDocId < docId || (_prevDocId == docId && !_prevAdd)); datastore::EntryRef featureRef = _fieldIndex.addFeatures(features); - _adds.push_back(PostingListKeyDataType(docId, featureRef.ref())); + _adds.push_back(PostingListKeyDataType(docId, PostingListEntryType(featureRef))); _listener.insert(_dItr.getKey()._wordRef, docId); _prevDocId = docId; _prevAdd = true; } +template <bool interleaved_features> void -OrderedFieldIndexInserter::remove(uint32_t docId) +OrderedFieldIndexInserter<interleaved_features>::remove(uint32_t docId) { assert(docId != noDocId); assert(_prevDocId == noDocId || _prevDocId < docId); @@ -127,8 +135,9 @@ OrderedFieldIndexInserter::remove(uint32_t docId) _prevAdd = false; } +template <bool interleaved_features> void -OrderedFieldIndexInserter::rewind() +OrderedFieldIndexInserter<interleaved_features>::rewind() { assert(_removes.empty() && _adds.empty()); _word = ""; @@ -137,10 +146,14 @@ OrderedFieldIndexInserter::rewind() _dItr.begin(); } +template <bool interleaved_features> datastore::EntryRef -OrderedFieldIndexInserter::getWordRef() const +OrderedFieldIndexInserter<interleaved_features>::getWordRef() const { return _dItr.getKey()._wordRef; } +template +class OrderedFieldIndexInserter<false>; + } diff --git a/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.h b/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.h index 18765f9bae3..0e04b126f32 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.h +++ b/searchlib/src/vespa/searchlib/memoryindex/ordered_field_index_inserter.h @@ -18,26 +18,30 @@ class IFieldIndexInsertListener; * and for each word updating the posting list with docId adds / removes. * * Insert order must be properly sorted, first by word, then by docId. + * + * The template parameter specifies whether the posting lists of the field index have interleaved features or not. */ +template <bool interleaved_features> class OrderedFieldIndexInserter : public IOrderedFieldIndexInserter { private: vespalib::stringref _word; uint32_t _prevDocId; bool _prevAdd; - using DictionaryTree = FieldIndex::DictionaryTree; - using PostingListStore = FieldIndex::PostingListStore; - using KeyComp = FieldIndex::KeyComp; - using WordKey = FieldIndex::WordKey; - using PostingListKeyDataType = FieldIndex::PostingListKeyDataType; - FieldIndex &_fieldIndex; - DictionaryTree::Iterator _dItr; + using FieldIndexType = FieldIndex<interleaved_features>; + using DictionaryTree = typename FieldIndexType::DictionaryTree; + using PostingListStore = typename FieldIndexType::PostingListStore; + using KeyComp = typename FieldIndexType::KeyComp; + using WordKey = typename FieldIndexType::WordKey; + using PostingListEntryType = typename FieldIndexType::PostingListEntryType; + using PostingListKeyDataType = typename FieldIndexType::PostingListKeyDataType; + FieldIndexType& _fieldIndex; + typename DictionaryTree::Iterator _dItr; IFieldIndexInsertListener &_listener; // Pending changes to posting list for (_word) std::vector<uint32_t> _removes; std::vector<PostingListKeyDataType> _adds; - static constexpr uint32_t noFieldId = std::numeric_limits<uint32_t>::max(); static constexpr uint32_t noDocId = std::numeric_limits<uint32_t>::max(); @@ -49,7 +53,7 @@ private: void flushWord(); public: - OrderedFieldIndexInserter(FieldIndex &fieldIndex); + OrderedFieldIndexInserter(FieldIndexType& fieldIndex); ~OrderedFieldIndexInserter() override; void setNextWord(const vespalib::stringref word) override; void add(uint32_t docId, const index::DocIdAndFeatures &features) override; @@ -71,8 +75,7 @@ public: */ void rewind() override; - // Used by unit test - datastore::EntryRef getWordRef() const; + datastore::EntryRef getWordRef() const override; }; } diff --git a/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp b/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp index 63040aab66f..0e84c2b7968 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp +++ b/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.cpp @@ -6,16 +6,18 @@ #include <vespa/vespalib/btree/btreenodeallocator.hpp> #include <vespa/vespalib/btree/btreenodestore.hpp> #include <vespa/vespalib/btree/btreeroot.hpp> +#include <vespa/vespalib/btree/btreestore.hpp> #include <vespa/log/log.h> LOG_SETUP(".searchlib.memoryindex.posting_iterator"); namespace search::memoryindex { -PostingIterator::PostingIterator(FieldIndex::PostingList::ConstIterator itr, - const FeatureStore & featureStore, - uint32_t packedIndex, - const fef::TermFieldMatchDataArray & matchData) : +template <bool interleaved_features> +PostingIterator<interleaved_features>::PostingIterator(PostingListIteratorType itr, + const FeatureStore& featureStore, + uint32_t packedIndex, + const fef::TermFieldMatchDataArray& matchData) : queryeval::RankedSearchIteratorBase(matchData), _itr(itr), _featureStore(featureStore), @@ -24,10 +26,12 @@ PostingIterator::PostingIterator(FieldIndex::PostingList::ConstIterator itr, _featureStore.setupForField(packedIndex, _featureDecoder); } -PostingIterator::~PostingIterator() {} +template <bool interleaved_features> +PostingIterator<interleaved_features>::~PostingIterator() = default; +template <bool interleaved_features> void -PostingIterator::initRange(uint32_t begin, uint32_t end) +PostingIterator<interleaved_features>::initRange(uint32_t begin, uint32_t end) { SearchIterator::initRange(begin, end); _itr.lower_bound(begin); @@ -39,8 +43,9 @@ PostingIterator::initRange(uint32_t begin, uint32_t end) clearUnpacked(); } +template <bool interleaved_features> void -PostingIterator::doSeek(uint32_t docId) +PostingIterator<interleaved_features>::doSeek(uint32_t docId) { if (getUnpacked()) { clearUnpacked(); @@ -53,8 +58,9 @@ PostingIterator::doSeek(uint32_t docId) } } +template <bool interleaved_features> void -PostingIterator::doUnpack(uint32_t docId) +PostingIterator<interleaved_features>::doUnpack(uint32_t docId) { if (!_matchData.valid() || getUnpacked()) { return; @@ -62,11 +68,70 @@ PostingIterator::doUnpack(uint32_t docId) assert(docId == getDocId()); assert(_itr.valid()); assert(docId == _itr.getKey()); - datastore::EntryRef featureRef(_itr.getData()); + datastore::EntryRef featureRef(_itr.getData().get_features()); _featureStore.setupForUnpackFeatures(featureRef, _featureDecoder); _featureDecoder.unpackFeatures(_matchData, docId); setUnpacked(); } +template +class PostingIterator<false>; + } +namespace search::btree { + +template class BTreeNodeTT<uint32_t, + search::memoryindex::PostingListEntry<false>, + search::btree::NoAggregated, + BTreeDefaultTraits::INTERNAL_SLOTS>; + +template class BTreeLeafNode<uint32_t, + search::memoryindex::PostingListEntry<false>, + search::btree::NoAggregated, + BTreeDefaultTraits::LEAF_SLOTS>; + +template class BTreeNodeStore<uint32_t, + search::memoryindex::PostingListEntry<false>, + search::btree::NoAggregated, + BTreeDefaultTraits::INTERNAL_SLOTS, + BTreeDefaultTraits::LEAF_SLOTS>; + +template class BTreeIteratorBase<uint32_t, + search::memoryindex::PostingListEntry<false>, + search::btree::NoAggregated, + BTreeDefaultTraits::INTERNAL_SLOTS, + BTreeDefaultTraits::LEAF_SLOTS, + BTreeDefaultTraits::PATH_SIZE>; + +template class BTreeIterator<uint32_t, + search::memoryindex::PostingListEntry<false>, + search::btree::NoAggregated, + std::less<uint32_t>, + BTreeDefaultTraits>; + +template class BTree<uint32_t, + search::memoryindex::PostingListEntry<false>, + search::btree::NoAggregated, + std::less<uint32_t>, + BTreeDefaultTraits>; + +template class BTreeRoot<uint32_t, + search::memoryindex::PostingListEntry<false>, + search::btree::NoAggregated, + std::less<uint32_t>, + BTreeDefaultTraits>; + +template class BTreeRootBase<uint32_t, + search::memoryindex::PostingListEntry<false>, + search::btree::NoAggregated, + BTreeDefaultTraits::INTERNAL_SLOTS, + BTreeDefaultTraits::LEAF_SLOTS>; + +template class BTreeNodeAllocator<uint32_t, + search::memoryindex::PostingListEntry<false>, + search::btree::NoAggregated, + BTreeDefaultTraits::INTERNAL_SLOTS, + BTreeDefaultTraits::LEAF_SLOTS>; + +} diff --git a/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.h b/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.h index de337ef49f3..f029c837cf7 100644 --- a/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.h +++ b/searchlib/src/vespa/searchlib/memoryindex/posting_iterator.h @@ -9,10 +9,15 @@ namespace search::memoryindex { /** * Search iterator for memory field index posting list. + * + * The template parameter specifies whether the wrapped posting list has interleaved features or not. */ +template <bool interleaved_features> class PostingIterator : public queryeval::RankedSearchIteratorBase { private: - FieldIndex::PostingList::ConstIterator _itr; + using FieldIndexType = FieldIndex<interleaved_features>; + using PostingListIteratorType = typename FieldIndexType::PostingList::ConstIterator; + PostingListIteratorType _itr; const FeatureStore &_featureStore; FeatureStore::DecodeContextCooked _featureDecoder; @@ -25,7 +30,7 @@ public: * @param packedIndex the field or field collection owning features. * @param matchData the match data to unpack features into. **/ - PostingIterator(FieldIndex::PostingList::ConstIterator itr, + PostingIterator(PostingListIteratorType itr, const FeatureStore &featureStore, uint32_t packedIndex, const fef::TermFieldMatchDataArray &matchData); diff --git a/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h b/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h new file mode 100644 index 00000000000..373de21e836 --- /dev/null +++ b/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h @@ -0,0 +1,37 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +# pragma once + +#include <vespa/vespalib/datastore/entryref.h> + +namespace search::memoryindex { + +/** + * Entry per document in memory index posting list. + */ +template <bool interleaved_features> +class PostingListEntry { + mutable datastore::EntryRef _features; // reference to compressed features + +public: + explicit PostingListEntry(datastore::EntryRef features) + : _features(features) + { + } + + PostingListEntry() + : _features() + { + } + + datastore::EntryRef get_features() const { return _features; } + + /* + * Reference moved features (used when compacting FeatureStore). + * The moved features must have the same content as the original + * features. + */ + void update_features(datastore::EntryRef features) const { _features = features; } +}; + +} diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fake_match_loop.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fake_match_loop.cpp index 7892f9fa70b..1b7f794826d 100644 --- a/searchlib/src/vespa/searchlib/test/fakedata/fake_match_loop.cpp +++ b/searchlib/src/vespa/searchlib/test/fakedata/fake_match_loop.cpp @@ -32,7 +32,7 @@ public: { _tfmda.add(&_md); _md.setNeedNormalFeatures(posting.enable_unpack_normal_features()); - _md.setNeedCheapFeatures(posting.enable_unpack_cheap_features()); + _md.setNeedInterleavedFeatures(posting.enable_unpack_interleaved_features()); _itr.reset(posting.createIterator(_tfmda)); } ~IteratorState() {} diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp index 54c0aa866b4..a4996c931e2 100644 --- a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp +++ b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.cpp @@ -129,10 +129,10 @@ search::queryeval::SearchIterator * FakeMemTreeOcc:: createIterator(const fef::TermFieldMatchDataArray &matchData) const { - return new search::memoryindex::PostingIterator(_tree.begin(_allocator), - _mgr._featureStore, - _packedIndex, - matchData); + return new search::memoryindex::PostingIterator<false>(_tree.begin(_allocator), + _mgr._featureStore, + _packedIndex, + matchData); } @@ -261,13 +261,13 @@ FakeMemTreeOccMgr::flush() lastWord = wordIdx; if (i->getRemove()) { if (itr.valid() && itr.getKey() == docId) { - uint64_t bits = _featureStore.bitSize(fw->getPackedIndex(), EntryRef(itr.getData())); + uint64_t bits = _featureStore.bitSize(fw->getPackedIndex(), EntryRef(itr.getData().get_features())); _featureSizes[wordIdx] -= RefType::align((bits + 7) / 8) * 8; tree.remove(itr); } } else { if (!itr.valid() || docId < itr.getKey()) { - tree.insert(itr, docId, i->getFeatureRef().ref()); + tree.insert(itr, docId, PostingListEntryType(i->getFeatureRef())); } } } diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h index f0363500559..69114611fe6 100644 --- a/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h +++ b/searchlib/src/vespa/searchlib/test/fakedata/fakememtreeocc.h @@ -9,18 +9,18 @@ #include <vespa/searchlib/bitcompression/compression.h> #include <vespa/searchlib/bitcompression/posocccompression.h> -namespace search { -namespace fakedata { +namespace search::fakedata { -class FakeMemTreeOccMgr : public FakeWord::RandomizedWriter -{ +class FakeMemTreeOccMgr : public FakeWord::RandomizedWriter { public: - typedef memoryindex::FieldIndex::PostingList Tree; - typedef Tree::NodeAllocatorType NodeAllocator; - typedef memoryindex::FeatureStore FeatureStore; - typedef datastore::EntryRef EntryRef; - typedef index::Schema Schema; - typedef bitcompression::PosOccFieldsParams PosOccFieldsParams; + // TODO: Create implementation for "interleaved features" posting list as well. + using Tree = memoryindex::FieldIndex<false>::PostingList; + using PostingListEntryType = memoryindex::FieldIndex<false>::PostingListEntryType; + using NodeAllocator = Tree::NodeAllocatorType; + using FeatureStore = memoryindex::FeatureStore; + using EntryRef = datastore::EntryRef; + using Schema = index::Schema; + using PosOccFieldsParams = bitcompression::PosOccFieldsParams; vespalib::GenerationHandler _generationHandler; NodeAllocator _allocator; @@ -179,6 +179,4 @@ public: queryeval::SearchIterator *createIterator(const fef::TermFieldMatchDataArray &matchData) const override; }; -} // namespace fakedata - -} // namespace search +} diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.cpp index fed34c85be1..a72fa7703cd 100644 --- a/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.cpp +++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.cpp @@ -50,7 +50,7 @@ FakePosting::l4SkipBitSize() const } bool -FakePosting::has_cheap_features() const +FakePosting::has_interleaved_features() const { return false; } @@ -62,7 +62,7 @@ FakePosting::enable_unpack_normal_features() const } bool -FakePosting::enable_unpack_cheap_features() const +FakePosting::enable_unpack_interleaved_features() const { return true; } diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.h b/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.h index 2010345a2eb..3fcc2427880 100644 --- a/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.h +++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeposting.h @@ -62,11 +62,11 @@ public: virtual bool hasWordPositions() const = 0; - virtual bool has_cheap_features() const; + virtual bool has_interleaved_features() const; virtual bool enable_unpack_normal_features() const; - virtual bool enable_unpack_cheap_features() const; + virtual bool enable_unpack_interleaved_features() const; /* * Single posting list performance, without feature unpack. diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp index e40cfd1748c..19080acaad3 100644 --- a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp +++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp @@ -397,7 +397,7 @@ FakeWord::validate(search::queryeval::SearchIterator *iterator, const fef::TermFieldMatchDataArray &matchData, uint32_t stride, bool unpack_normal_features, - bool unpack_cheap_features, + bool unpack_interleaved_features, bool verbose) const { iterator->initFullRange(); @@ -435,7 +435,7 @@ FakeWord::validate(search::queryeval::SearchIterator *iterator, for (size_t lfi = 0; lfi < matchData.size(); ++lfi) { if (matchData[lfi]->getDocId() != docId) continue; - if (unpack_cheap_features) { + if (unpack_interleaved_features) { assert(d->_collapsedDocWordFeatures._field_len == matchData[lfi]->getFieldLength()); assert(d->_collapsedDocWordFeatures._num_occs == matchData[lfi]->getNumOccs()); } else { @@ -477,7 +477,7 @@ bool FakeWord::validate(search::queryeval::SearchIterator *iterator, const fef::TermFieldMatchDataArray &matchData, bool unpack_normal_features, - bool unpack_cheap_features, + bool unpack_interleaved_features, bool verbose) const { iterator->initFullRange(); @@ -503,7 +503,7 @@ FakeWord::validate(search::queryeval::SearchIterator *iterator, for (size_t lfi = 0; lfi < matchData.size(); ++lfi) { if (matchData[lfi]->getDocId() != docId) continue; - if (unpack_cheap_features) { + if (unpack_interleaved_features) { assert(d->_collapsedDocWordFeatures._field_len == matchData[lfi]->getFieldLength()); assert(d->_collapsedDocWordFeatures._num_occs == matchData[lfi]->getNumOccs()); } else { @@ -589,7 +589,7 @@ bool FakeWord::validate(FieldReader &fieldReader, uint32_t wordNum, const fef::TermFieldMatchDataArray &matchData, - bool decode_cheap_features, + bool decode_interleaved_features, bool verbose) const { uint32_t docId = 0; @@ -621,7 +621,7 @@ FakeWord::validate(FieldReader &fieldReader, docId = features.doc_id(); assert(d != de); assert(d->_docId == docId); - if (decode_cheap_features) { + if (decode_interleaved_features) { assert(d->_collapsedDocWordFeatures._field_len == features.field_length()); assert(d->_collapsedDocWordFeatures._num_occs == features.num_occs()); } diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.h b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.h index 1b0e315f157..30f960c2d4c 100644 --- a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.h +++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.h @@ -241,14 +241,14 @@ public: const fef::TermFieldMatchDataArray &matchData, uint32_t stride, bool unpack_normal_features, - bool unpack_cheap_features, + bool unpack_interleaved_features, bool verbose) const; bool validate(search::queryeval::SearchIterator *iterator, const fef::TermFieldMatchDataArray &matchData, bool unpack_normal_features, - bool unpack_cheap_features, + bool unpack_interleaved_features, bool verbose) const; bool validate(search::queryeval::SearchIterator *iterator, @@ -258,7 +258,7 @@ public: validate(search::diskindex::FieldReader &fieldReader, uint32_t wordNum, const fef::TermFieldMatchDataArray &matchData, - bool decode_cheap_features, + bool decode_interleaved_features, bool verbose) const; void validate(const std::vector<uint32_t> &docIds) const; diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp index f4189d0ce66..d6c8ffc5caf 100644 --- a/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp +++ b/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.cpp @@ -173,7 +173,7 @@ FakeZcFilterOcc::setupT(const FakeWord &fw) params.set("docIdLimit", fw._docIdLimit); params.set("minChunkDocs", _posting_params._min_chunk_docs); // Control chunking params.set("minSkipDocs", _posting_params._min_skip_docs); // Control skip info - params.set("cheap_features", _posting_params._encode_cheap_features); + params.set("interleaved_features", _posting_params._encode_interleaved_features); writer.set_posting_list_params(params); auto &writeContext = writer.get_write_context(); search::ComprBuffer &cb = writeContext; @@ -278,7 +278,7 @@ FakeZcFilterOcc::validate_read(const FakeWord &fw) const assert(features.doc_id() == doc._docId); assert(features.elements().size() == check_features.elements().size()); assert(features.word_positions().size() == check_features.word_positions().size()); - if (_posting_params._encode_cheap_features) { + if (_posting_params._encode_interleaved_features) { assert(features.field_length() == doc._collapsedDocWordFeatures._field_len); assert(features.num_occs() == doc._collapsedDocWordFeatures._num_occs); } @@ -318,9 +318,9 @@ FakeZcFilterOcc::hasWordPositions() const } bool -FakeZcFilterOcc::has_cheap_features() const +FakeZcFilterOcc::has_interleaved_features() const { - return _posting_params._encode_cheap_features; + return _posting_params._encode_interleaved_features; } size_t @@ -725,7 +725,7 @@ class FakeZc4SkipPosOcc : public FakeZcFilterOcc search::index::PostingListCounts _counts; protected: bool _unpack_normal_features; - bool _unpack_cheap_features; + bool _unpack_interleaved_features; FakeZc4SkipPosOcc(const FakeWord &fw, const Zc4PostingParams &posting_params, const char *name_suffix); public: FakeZc4SkipPosOcc(const FakeWord &fw); @@ -734,7 +734,7 @@ public: bool hasWordPositions() const override; SearchIterator *createIterator(const TermFieldMatchDataArray &matchData) const override; bool enable_unpack_normal_features() const override { return _unpack_normal_features; } - bool enable_unpack_cheap_features() const override { return _unpack_cheap_features; } + bool enable_unpack_interleaved_features() const override { return _unpack_interleaved_features; } }; @@ -743,7 +743,7 @@ FakeZc4SkipPosOcc<bigEndian>::FakeZc4SkipPosOcc(const FakeWord &fw, const Zc4Pos : FakeZcFilterOcc(fw, bigEndian, posting_params, name_suffix), _counts(), _unpack_normal_features(true), - _unpack_cheap_features(true) + _unpack_interleaved_features(true) { setup(fw); _counts._bitLength = _compressedBits; @@ -784,10 +784,10 @@ createIterator(const TermFieldMatchDataArray &matchData) const { if (matchData.valid()) { assert(_unpack_normal_features == matchData[0]->needs_normal_features()); - assert(_unpack_cheap_features == matchData[0]->needs_cheap_features()); + assert(_unpack_interleaved_features == matchData[0]->needs_interleaved_features()); } else { assert(!_unpack_normal_features); - assert(!_unpack_cheap_features); + assert(!_unpack_interleaved_features); } return create_zc_posocc_iterator(bigEndian, _counts, Position(_compressed.first, 0), _compressedBits, _posting_params, _fieldsParams, matchData).release(); } @@ -821,7 +821,7 @@ public: : FakeZc4SkipPosOcc<true>(fw, Zc4PostingParams(force_skip, disable_chunking, fw._docIdLimit, false, true, true), ".zc4skipposoccbe.cf.ncu") { - _unpack_cheap_features = false; + _unpack_interleaved_features = false; } }; @@ -854,7 +854,7 @@ public: : FakeZc4SkipPosOcc<true>(fw, Zc4PostingParams(disable_skip, disable_chunking, fw._docIdLimit, false, true, true), ".zc4noskipposoccbe.cf.ncu") { - _unpack_cheap_features = false; + _unpack_interleaved_features = false; } }; diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.h b/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.h index 398f537c67a..030c4b7e936 100644 --- a/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.h +++ b/searchlib/src/vespa/searchlib/test/fakedata/fakezcfilterocc.h @@ -57,7 +57,7 @@ public: size_t bitSize() const override; bool hasWordPositions() const override; - bool has_cheap_features() const override; + bool has_interleaved_features() const override; size_t skipBitSize() const override; size_t l1SkipBitSize() const override; size_t l2SkipBitSize() const override; diff --git a/searchlib/src/vespa/searchlib/test/memoryindex/ordered_field_index_inserter.h b/searchlib/src/vespa/searchlib/test/memoryindex/ordered_field_index_inserter.h index c14c454bad6..c0ea7be0ce1 100644 --- a/searchlib/src/vespa/searchlib/test/memoryindex/ordered_field_index_inserter.h +++ b/searchlib/src/vespa/searchlib/test/memoryindex/ordered_field_index_inserter.h @@ -11,45 +11,38 @@ class OrderedFieldIndexInserter : public IOrderedFieldIndexInserter { std::stringstream _ss; bool _first; bool _verbose; - bool _show_cheap_features; + bool _show_interleaved_features; uint32_t _fieldId; - void - addComma() - { + void addComma() { if (!_first) { _ss << ","; } else { _first = false; } } + public: OrderedFieldIndexInserter() : _ss(), _first(true), _verbose(false), - _show_cheap_features(false), + _show_interleaved_features(false), _fieldId(0) { } - virtual void - setNextWord(const vespalib::stringref word) override - { + virtual void setNextWord(const vespalib::stringref word) override { addComma(); _ss << "w=" << word; } - void - setFieldId(uint32_t fieldId) - { + void setFieldId(uint32_t fieldId) { _fieldId = fieldId; } - virtual void - add(uint32_t docId, - const index::DocIdAndFeatures &features) override - { + virtual void add(uint32_t docId, + const index::DocIdAndFeatures &features) override { (void) features; addComma(); _ss << "a=" << docId; @@ -57,7 +50,7 @@ public: _ss << "("; auto wpi = features.word_positions().begin(); bool firstElement = true; - if (_show_cheap_features) { + if (_show_interleaved_features) { _ss << "fl=" << features.field_length() << ",occs=" << features.num_occs(); firstElement = false; @@ -85,9 +78,9 @@ public: } } - virtual void - remove(uint32_t docId) override - { + virtual datastore::EntryRef getWordRef() const override { return datastore::EntryRef(); } + + virtual void remove(uint32_t docId) override { addComma(); _ss << "r=" << docId; } @@ -99,22 +92,18 @@ public: _ss << "f=" << _fieldId; } - std::string - toStr() const - { + std::string toStr() const { return _ss.str(); } - void - reset() - { + void reset() { _ss.str(""); _first = true; _verbose = false; } void setVerbose() { _verbose = true; } - void set_show_cheap_features() { _show_cheap_features = true; } + void set_show_interleaved_features() { _show_interleaved_features = true; } }; } diff --git a/searchlib/src/vespa/searchlib/test/memoryindex/wrap_inserter.h b/searchlib/src/vespa/searchlib/test/memoryindex/wrap_inserter.h index eeb09898aa2..268bf834d21 100644 --- a/searchlib/src/vespa/searchlib/test/memoryindex/wrap_inserter.h +++ b/searchlib/src/vespa/searchlib/test/memoryindex/wrap_inserter.h @@ -12,7 +12,7 @@ namespace search::memoryindex::test { */ class WrapInserter { private: - OrderedFieldIndexInserter& _inserter; + IOrderedFieldIndexInserter& _inserter; public: WrapInserter(FieldIndexCollection& field_indexes, uint32_t field_id) @@ -20,7 +20,7 @@ public: { } - WrapInserter(FieldIndex& field_index) + WrapInserter(IFieldIndex& field_index) : _inserter(field_index.getInserter()) { } diff --git a/slobrok/src/tests/configure/configure.cpp b/slobrok/src/tests/configure/configure.cpp index 2783d0e3ebf..bf41b77ab05 100644 --- a/slobrok/src/tests/configure/configure.cpp +++ b/slobrok/src/tests/configure/configure.cpp @@ -80,7 +80,7 @@ struct SpecList bool compare(MirrorAPI &api, const char *pattern, SpecList expect) { - for (int i = 0; i < 250; ++i) { + for (int i = 0; i < 600; ++i) { SpecList actual(api.lookup(pattern)); if (actual == expect) { return true; diff --git a/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h b/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h index 128a7dc9424..07137263cf6 100644 --- a/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h +++ b/staging_vespalib/src/vespa/vespalib/stllike/lrucache_map.h @@ -3,6 +3,7 @@ #include <vespa/vespalib/stllike/hashtable.h> #include <vespa/vespalib/stllike/hash_fun.h> +#include <vespa/vespalib/stllike/select.h> #include <vector> namespace vespalib { @@ -29,7 +30,7 @@ struct LruParam { typedef LinkedValue<V> LV; typedef std::pair< K, LV > value_type; - typedef std::_Select1st< value_type > select_key; + typedef vespalib::Select1st< value_type > select_key; typedef K Key; typedef V Value; typedef H Hash; diff --git a/tenant-base/pom.xml b/tenant-base/pom.xml index de710e6e1b8..3c48d22085e 100644 --- a/tenant-base/pom.xml +++ b/tenant-base/pom.xml @@ -217,14 +217,14 @@ </profile> <profile> - <id>functional-tests</id> + <id>system-tests</id> <build> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <configuration> - <groups>ai.vespa.hosted.cd.FunctionalTest</groups> + <groups>ai.vespa.hosted.cd.SystemTest</groups> <excludedGroups>ai.vespa.hosted.cd.EmptyGroup</excludedGroups> </configuration> </plugin> @@ -233,14 +233,14 @@ </profile> <profile> - <id>upgrade-tests</id> + <id>staging-tests</id> <build> <plugins> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <configuration> - <groups>ai.vespa.hosted.cd.UpgradeTest</groups> + <groups>ai.vespa.hosted.cd.StagingTest</groups> <excludedGroups>ai.vespa.hosted.cd.EmptyGroup</excludedGroups> </configuration> </plugin> @@ -371,8 +371,8 @@ <artifactId>maven-surefire-plugin</artifactId> <configuration> <excludedGroups> - ai.vespa.hosted.cd.FunctionalTest, - ai.vespa.hosted.cd.UpgradeTest, + ai.vespa.hosted.cd.SystemTest, + ai.vespa.hosted.cd.StagingTest, ai.vespa.hosted.cd.ProductionTest </excludedGroups> </configuration> diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java new file mode 100644 index 00000000000..40377da30ef --- /dev/null +++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/StagingTest.java @@ -0,0 +1,22 @@ +// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.hosted.cd; + +/** + * Tests that assert continuity of behaviour for Vespa application deployments, through upgrades. + * + * These tests are run whenever a change is pushed to a Vespa application, and whenever the Vespa platform + * is upgraded, and before any deployments to production zones. When these tests fails, the tested change to + * the Vespa application is not rolled out. + * + * A typical upgrade test is to do some operations against a test deployment prior to upgrade, like feed and + * search for some documents, perhaps recording some metrics from the deployment, and then to upgrade it, + * repeat the exercise, and compare the results from pre and post upgrade. + * + * TODO Split in platform upgrades and application upgrades? + * + * @author jonmv + */ +public interface StagingTest { + // Want to verify documents are not damaged by upgrade. + // May want to verify metrics during upgrade. +} diff --git a/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java b/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java new file mode 100644 index 00000000000..c67d86fc8de --- /dev/null +++ b/tenant-cd/src/main/java/ai/vespa/hosted/cd/SystemTest.java @@ -0,0 +1,30 @@ +// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.hosted.cd; + +/** + * Tests that compare the behaviour of a Vespa application deployment against a fixed specification. + * + * These tests are run whenever a change is pushed to a Vespa application, and whenever the Vespa platform + * is upgraded, and before any deployments to production zones. When these tests fails, the tested change to + * the Vespa application is not rolled out. + * + * A typical system test is to feed some documents, optionally verifying that the documents have been processed + * as expected, and then to see that queries give the expected results. Another common use is to verify integration + * with external services. + * + * @author jonmv + */ +public interface SystemTest { + // Want to feed some documents. + // Want to verify document processing and routing is as expected. + // Want to check recall on those documents. + // Want to verify queries give expected documents. + // Want to verify searchers. + // Want to verify updates. + // Want to verify deletion. + // May want to verify reprocessing. + // Must likely delete documents between tests. + // Must be able to feed documents, setting route. + // Must be able to search. + // Must be able to visit. +} diff --git a/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/TestProfile.java b/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/TestProfile.java index 8170af15535..018acb17387 100644 --- a/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/TestProfile.java +++ b/vespa-testrunner-components/src/main/java/com/yahoo/vespa/hosted/testrunner/TestProfile.java @@ -6,8 +6,8 @@ package com.yahoo.vespa.hosted.testrunner; */ enum TestProfile { - SYSTEM_TEST("ai.vespa.hosted.cd.FunctionalTest, com.yahoo.vespa.tenant.systemtest.base.SystemTest", true), - STAGING_TEST("ai.vespa.hosted.cd.UpgradeTest, com.yahoo.vespa.tenant.systemtest.base.StagingTest", true), + SYSTEM_TEST("ai.vespa.hosted.cd.SystemTest, com.yahoo.vespa.tenant.systemtest.base.SystemTest", true), + STAGING_TEST("ai.vespa.hosted.cd.StagingTest, com.yahoo.vespa.tenant.systemtest.base.StagingTest", true), PRODUCTION_TEST("ai.vespa.hosted.cd.ProductionTest, com.yahoo.vespa.tenant.systemtest.base.ProductionTest", false); private final String group; diff --git a/vespa-testrunner-components/src/test/resources/pom.xml_system_tests b/vespa-testrunner-components/src/test/resources/pom.xml_system_tests index d4b5bd54404..86c36afd636 100644 --- a/vespa-testrunner-components/src/test/resources/pom.xml_system_tests +++ b/vespa-testrunner-components/src/test/resources/pom.xml_system_tests @@ -47,10 +47,10 @@ <dependenciesToScan> <dependency>com.yahoo.vespa.testrunner.test:main.jar</dependency> </dependenciesToScan> - <groups>ai.vespa.hosted.cd.FunctionalTest, com.yahoo.vespa.tenant.systemtest.base.SystemTest</groups> + <groups>ai.vespa.hosted.cd.SystemTest, com.yahoo.vespa.tenant.systemtest.base.SystemTest</groups> <excludedGroups>com.yahoo.vespa.tenant.systemtest.base.impl.EmptyExcludeGroup.class</excludedGroups> <excludes> - <exclude>ai.vespa.hosted.cd.FunctionalTest, com.yahoo.vespa.tenant.systemtest.base.SystemTest</exclude> + <exclude>ai.vespa.hosted.cd.SystemTest, com.yahoo.vespa.tenant.systemtest.base.SystemTest</exclude> </excludes> <reportsDirectory>${env.TEST_DIR}</reportsDirectory> <redirectTestOutputToFile>false</redirectTestOutputToFile> diff --git a/vespajlib/abi-spec.json b/vespajlib/abi-spec.json index b2b895040bc..3b733105d2e 100644 --- a/vespajlib/abi-spec.json +++ b/vespajlib/abi-spec.json @@ -104,14 +104,22 @@ "com.yahoo.data.access.ObjectTraverser" ], "attributes": [ - "public", - "final" + "public" ], "methods": [ "public void <init>(java.lang.StringBuilder, boolean)", "public void encode(com.yahoo.data.access.Inspector)", + "protected void encodeEMPTY()", + "protected void encodeBOOL(boolean)", + "protected void encodeLONG(long)", + "protected void encodeDOUBLE(double)", + "protected void encodeSTRING(java.lang.String)", + "protected void encodeDATA(byte[])", + "protected void encodeARRAY(com.yahoo.data.access.Inspector)", + "protected void encodeOBJECT(com.yahoo.data.access.Inspector)", "public void entry(int, com.yahoo.data.access.Inspector)", - "public void field(java.lang.String, com.yahoo.data.access.Inspector)" + "public void field(java.lang.String, com.yahoo.data.access.Inspector)", + "public java.lang.StringBuilder target()" ], "fields": [] }, @@ -124,7 +132,8 @@ ], "methods": [ "public void <init>()", - "public static java.lang.StringBuilder render(com.yahoo.data.access.Inspectable, java.lang.StringBuilder, boolean)" + "public static java.lang.StringBuilder render(com.yahoo.data.access.Inspectable, java.lang.StringBuilder, boolean)", + "public static java.lang.StringBuilder render(com.yahoo.data.access.Inspectable, com.yahoo.data.access.simple.JsonRender$StringEncoder)" ], "fields": [] }, diff --git a/vespajlib/src/main/java/com/yahoo/data/access/simple/JsonRender.java b/vespajlib/src/main/java/com/yahoo/data/access/simple/JsonRender.java index 253b0c60927..9f662c77c59 100644 --- a/vespajlib/src/main/java/com/yahoo/data/access/simple/JsonRender.java +++ b/vespajlib/src/main/java/com/yahoo/data/access/simple/JsonRender.java @@ -11,19 +11,25 @@ import com.yahoo.data.access.ObjectTraverser; * * @author arnej27959 */ -public final class JsonRender -{ +public final class JsonRender { + public static StringBuilder render(Inspectable value, StringBuilder target, - boolean compact) - { - StringEncoder enc = new StringEncoder(target, compact); - enc.encode(value.inspect()); - return target; + boolean compact) { + return render(value, new StringEncoder(target, compact)); + } + + /** + * Renders the given value to the target stringbuilder with a given encoder. + * This is useful to use an encoder where rendering of some value types is customized. + */ + public static StringBuilder render(Inspectable value, StringEncoder encoder) { + encoder.encode(value.inspect()); + return encoder.target(); } - public static final class StringEncoder implements ArrayTraverser, ObjectTraverser - { + public static class StringEncoder implements ArrayTraverser, ObjectTraverser { + private final StringBuilder out; private boolean head = true; private boolean compact; @@ -41,21 +47,21 @@ public final class JsonRender } } - private void encodeEMPTY() { + protected void encodeEMPTY() { out.append("null"); } - private void encodeBOOL(boolean value) { + protected void encodeBOOL(boolean value) { out.append(value ? "true" : "false"); } - private void encodeLONG(long value) { - out.append(String.valueOf(value)); + protected void encodeLONG(long value) { + out.append(value); } - private void encodeDOUBLE(double value) { + protected void encodeDOUBLE(double value) { if (Double.isFinite(value)) { - out.append(String.valueOf(value)); + out.append(value); } else { out.append("null"); } @@ -63,7 +69,7 @@ public final class JsonRender static final char[] hex = "0123456789ABCDEF".toCharArray(); - private void encodeSTRING(String value) { + protected void encodeSTRING(String value) { out.append('"'); for (char c : value.toCharArray()) { switch (c) { @@ -89,7 +95,7 @@ public final class JsonRender out.append('"'); } - private void encodeDATA(byte[] value) { + protected void encodeDATA(byte[] value) { out.append('"'); out.append("0x"); for (int pos = 0; pos < value.length; pos++) { @@ -99,14 +105,14 @@ public final class JsonRender out.append('"'); } - private void encodeARRAY(Inspector inspector) { + protected void encodeARRAY(Inspector inspector) { openScope("["); ArrayTraverser at = this; inspector.traverse(at); closeScope("]"); } - private void encodeOBJECT(Inspector inspector) { + protected void encodeOBJECT(Inspector inspector) { openScope("{"); ObjectTraverser ot = this; inspector.traverse(ot); @@ -164,5 +170,10 @@ public final class JsonRender out.append(' '); encodeValue(inspector); } + + /** Returns the target this is encoding values to */ + public StringBuilder target() { return out; } + } + } diff --git a/vespajlib/src/main/java/com/yahoo/lang/MutableInteger.java b/vespajlib/src/main/java/com/yahoo/lang/MutableInteger.java index a988a3f6fa2..e2da62b6098 100644 --- a/vespajlib/src/main/java/com/yahoo/lang/MutableInteger.java +++ b/vespajlib/src/main/java/com/yahoo/lang/MutableInteger.java @@ -24,6 +24,12 @@ public class MutableInteger { return value; } + /** Increments the value by 1 and returns the value of this *before* incrementing */ + public int next() { + value++; + return value - 1; + } + /** Adds the increment to the current value and returns the resulting value */ public int subtract(int increment) { value -= increment; diff --git a/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java b/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java index aca2bfc1b0f..02f54b5790a 100644 --- a/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java +++ b/vespajlib/src/main/java/com/yahoo/tensor/IndexedTensor.java @@ -210,7 +210,36 @@ public abstract class IndexedTensor implements Tensor { } @Override - public String toString() { return Tensor.toStandardString(this); } + public String toString() { + if (type.rank() == 0) return Tensor.toStandardString(this); + if (type.dimensions().stream().anyMatch(d -> d.size().isEmpty())) return Tensor.toStandardString(this); + + Indexes indexes = Indexes.of(dimensionSizes); + + StringBuilder b = new StringBuilder(type.toString()).append(":"); + for (int index = 0; index < size(); index++) { + indexes.next(); + + // start brackets + for (int i = 0; i < indexes.rightDimensionsWhichAreAtStart(); i++) + b.append("["); + + // value + if (type.valueType() == TensorType.Value.DOUBLE) + b.append(get(index)); + else if (type.valueType() == TensorType.Value.FLOAT) + b.append(get(index)); // TODO: Use getFloat + else + throw new IllegalStateException("Unexpected value type " + type.valueType()); + + // end bracket and comma + for (int i = 0; i < indexes.rightDimensionsWhichAreAtEnd(); i++) + b.append("]"); + if (index < size() - 1) + b.append(", "); + } + return b.toString(); + } @Override public boolean equals(Object other) { @@ -382,8 +411,10 @@ public abstract class IndexedTensor implements Tensor { DimensionSizes sizes() { return sizes; } + /** Sets a value by its right-adjacent traversal position */ public abstract void cellByDirectIndex(long index, double value); + /** Sets a value by its right-adjacent traversal position */ public abstract void cellByDirectIndex(long index, float value); } @@ -827,6 +858,27 @@ public abstract class IndexedTensor implements Tensor { public abstract void next(); + /** Returns the number of dimensions from the right which are currently at the start position (0) */ + int rightDimensionsWhichAreAtStart() { + int dimension = indexes.length - 1; + int atStartCount = 0; + while (dimension >= 0 && indexes[dimension] == 0) { + atStartCount++; + dimension--; + } + return atStartCount; + } + + /** Returns the number of dimensions from the right which are currently at the end position */ + int rightDimensionsWhichAreAtEnd() { + int dimension = indexes.length - 1; + int atEndCount = 0; + while (dimension >= 0 && indexes[dimension] == dimensionSizes().size(dimension) - 1) { + atEndCount++; + dimension--; + } + return atEndCount; + } } private final static class EmptyIndexes extends Indexes { diff --git a/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java b/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java index 52635905d72..c73ff03a0eb 100644 --- a/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java +++ b/vespajlib/src/main/java/com/yahoo/tensor/serialization/JsonFormat.java @@ -1,6 +1,7 @@ // Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.tensor.serialization; +import com.yahoo.lang.MutableInteger; import com.yahoo.slime.ArrayTraverser; import com.yahoo.slime.Cursor; import com.yahoo.slime.Inspector; @@ -8,6 +9,7 @@ import com.yahoo.slime.JsonDecoder; import com.yahoo.slime.ObjectTraverser; import com.yahoo.slime.Slime; import com.yahoo.slime.Type; +import com.yahoo.tensor.IndexedTensor; import com.yahoo.tensor.Tensor; import com.yahoo.tensor.TensorAddress; import com.yahoo.tensor.TensorType; @@ -17,23 +19,37 @@ import java.util.Iterator; /** * Writes tensors on the JSON format used in Vespa tensor document fields: * A JSON map containing a 'cells' array. - * See http://docs.vespa.ai/documentation/reference/document-json-put-format.html#tensor + * See a http://docs.vespa.ai/documentation/reference/document-json-put-format.html#tensor + * + * @author bratseth */ -// TODO: We should probably move reading of this format from the document module to here public class JsonFormat { - /** Serializes the given tensor into JSON format */ + /** Serializes the given tensor value into JSON format */ public static byte[] encode(Tensor tensor) { Slime slime = new Slime(); Cursor root = slime.setObject(); - Cursor cellsArray = root.setArray("cells"); + encodeCells(tensor, root); + return com.yahoo.slime.JsonFormat.toJsonBytes(slime); + } + + /** Serializes the given tensor type and value into JSON format */ + public static byte[] encodeWithType(Tensor tensor) { + Slime slime = new Slime(); + Cursor root = slime.setObject(); + root.setString("type", tensor.type().toString()); + encodeCells(tensor, root); + return com.yahoo.slime.JsonFormat.toJsonBytes(slime); + } + + private static void encodeCells(Tensor tensor, Cursor rootObject) { + Cursor cellsArray = rootObject.setArray("cells"); for (Iterator<Tensor.Cell> i = tensor.cellIterator(); i.hasNext(); ) { Tensor.Cell cell = i.next(); Cursor cellObject = cellsArray.addObject(); encodeAddress(tensor.type(), cell.getKey(), cellObject.setObject("address")); cellObject.setDouble("value", cell.getValue()); } - return com.yahoo.slime.JsonFormat.toJsonBytes(slime); } private static void encodeAddress(TensorType type, TensorAddress address, Cursor addressObject) { @@ -42,14 +58,24 @@ public class JsonFormat { } /** Deserializes the given tensor from JSON format */ + // NOTE: This must be kept in sync with com.yahoo.document.json.readers.TensorReader in the document module public static Tensor decode(TensorType type, byte[] jsonTensorValue) { - Tensor.Builder tensorBuilder = Tensor.Builder.of(type); + Tensor.Builder builder = Tensor.Builder.of(type); Inspector root = new JsonDecoder().decode(new Slime(), jsonTensorValue).get(); - Inspector cells = root.field("cells"); + + if (root.field("cells").valid()) + decodeCells(root.field("cells"), builder); + else if (root.field("values").valid()) + decodeValues(root.field("values"), builder); + else if (builder.type().dimensions().stream().anyMatch(d -> d.isIndexed())) // sparse can be empty + throw new IllegalArgumentException("Expected a tensor value to contain either 'cells' or 'values'"); + return builder.build(); + } + + private static void decodeCells(Inspector cells, Tensor.Builder builder) { if ( cells.type() != Type.ARRAY) - throw new IllegalArgumentException("Excepted an array item named 'cells' at the top level"); - cells.traverse((ArrayTraverser) (__, cell) -> decodeCell(cell, tensorBuilder.cell())); - return tensorBuilder.build(); + throw new IllegalArgumentException("Excepted 'cells' to contain an array, not " + cells.type()); + cells.traverse((ArrayTraverser) (__, cell) -> decodeCell(cell, builder.cell())); } private static void decodeCell(Inspector cell, Tensor.Builder.CellBuilder cellBuilder) { @@ -64,4 +90,20 @@ public class JsonFormat { cellBuilder.value(value.asDouble()); } + private static void decodeValues(Inspector values, Tensor.Builder builder) { + if ( ! (builder instanceof IndexedTensor.BoundBuilder)) + throw new IllegalArgumentException("The 'values' field can only be used with dense tensors. " + + "Use 'cells' instead"); + if ( values.type() != Type.ARRAY) + throw new IllegalArgumentException("Excepted 'values' to contain an array, not " + values.type()); + + IndexedTensor.BoundBuilder indexedBuilder = (IndexedTensor.BoundBuilder)builder; + MutableInteger index = new MutableInteger(0); + values.traverse((ArrayTraverser) (__, value) -> { + if (value.type() != Type.LONG && value.type() != Type.DOUBLE) + throw new IllegalArgumentException("Excepted the values array to contain numbers, not " + value.type()); + indexedBuilder.cellByDirectIndex(index.next(), value.asDouble()); + }); + } + } diff --git a/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java index 63fe40565bd..1928971820c 100644 --- a/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java +++ b/vespajlib/src/test/java/com/yahoo/tensor/TensorParserTestCase.java @@ -23,37 +23,42 @@ public class TensorParserTestCase { @Test public void testDenseParsing() { - assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor()")).build(), - Tensor.from("tensor():[]")); - assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[1])")).cell(1.0, 0).build(), - Tensor.from("tensor(x[1]):[1.0]")); - assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[2])")).cell(1.0, 0).cell(2.0, 1).build(), - Tensor.from("tensor(x[2]):[1.0, 2.0]")); - assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[2],y[3])")) + assertDense(Tensor.Builder.of(TensorType.fromSpec("tensor()")).build(), + "tensor():{0.0}"); + assertDense(Tensor.Builder.of(TensorType.fromSpec("tensor()")).cell(1.3).build(), + "tensor():{1.3}"); + assertDense(Tensor.Builder.of(TensorType.fromSpec("tensor(x[])")).cell(1.0, 0).build(), + "tensor(x[]):{{x:0}:1.0}"); + assertDense(Tensor.Builder.of(TensorType.fromSpec("tensor(x[1])")).cell(1.0, 0).build(), + "tensor(x[1]):[1.0]"); + assertDense(Tensor.Builder.of(TensorType.fromSpec("tensor(x[2])")).cell(1.0, 0).cell(2.0, 1).build(), + "tensor(x[2]):[1.0, 2.0]"); + assertDense(Tensor.Builder.of(TensorType.fromSpec("tensor(x[2],y[3])")) .cell(1.0, 0, 0) .cell(2.0, 0, 1) .cell(3.0, 0, 2) .cell(4.0, 1, 0) .cell(5.0, 1, 1) .cell(6.0, 1, 2).build(), - Tensor.from("tensor(x[2],y[3]):[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]")); - assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[1],y[2],z[3])")) + "tensor(x[2],y[3]):[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]"); + assertDense(Tensor.Builder.of(TensorType.fromSpec("tensor(x[1],y[2],z[3])")) .cell(1.0, 0, 0, 0) .cell(2.0, 0, 0, 1) .cell(3.0, 0, 0, 2) .cell(4.0, 0, 1, 0) .cell(5.0, 0, 1, 1) .cell(6.0, 0, 1, 2).build(), - Tensor.from("tensor(x[1],y[2],z[3]):[[[1.0], [2.0]], [[3.0], [4.0]], [[5.0], [6.0]]]")); - assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[3],y[2],z[1])")) + "tensor(x[1],y[2],z[3]):[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]"); + assertDense(Tensor.Builder.of(TensorType.fromSpec("tensor(x[3],y[2],z[1])")) .cell(1.0, 0, 0, 0) .cell(2.0, 0, 1, 0) .cell(3.0, 1, 0, 0) .cell(4.0, 1, 1, 0) .cell(5.0, 2, 0, 0) .cell(6.0, 2, 1, 0).build(), - Tensor.from("tensor(x[3],y[2],z[1]):[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]")); - assertEquals(Tensor.Builder.of(TensorType.fromSpec("tensor(x[3],y[2],z[1])")) + "tensor(x[3],y[2],z[1]):[[[1.0], [2.0]], [[3.0], [4.0]], [[5.0], [6.0]]]"); + assertEquals("Messy input", + Tensor.Builder.of(TensorType.fromSpec("tensor(x[3],y[2],z[1])")) .cell( 1.0, 0, 0, 0) .cell( 2.0, 0, 1, 0) .cell( 3.0, 1, 0, 0) @@ -61,6 +66,20 @@ public class TensorParserTestCase { .cell( 5.0, 2, 0, 0) .cell(-6.0, 2, 1, 0).build(), Tensor.from("tensor( x[3],y[2],z[1]) : [ [ [1.0, 2.0, 3.0] , [4.0, 5,-6.0] ] ]")); + assertEquals("Skipping syntactic sugar", + Tensor.Builder.of(TensorType.fromSpec("tensor(x[3],y[2],z[1])")) + .cell( 1.0, 0, 0, 0) + .cell( 2.0, 0, 1, 0) + .cell( 3.0, 1, 0, 0) + .cell( 4.0, 1, 1, 0) + .cell( 5.0, 2, 0, 0) + .cell(-6.0, 2, 1, 0).build(), + Tensor.from("tensor( x[3],y[2],z[1]) : [1.0, 2.0, 3.0 , 4.0, 5, -6.0]")); + } + + private void assertDense(Tensor expectedTensor, String denseFormat) { + assertEquals(denseFormat, expectedTensor, Tensor.from(denseFormat)); + assertEquals(denseFormat, expectedTensor.toString()); } @Test diff --git a/vespajlib/src/test/java/com/yahoo/tensor/TensorTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/TensorTestCase.java index c53db160806..3d5d8d1f5ae 100644 --- a/vespajlib/src/test/java/com/yahoo/tensor/TensorTestCase.java +++ b/vespajlib/src/test/java/com/yahoo/tensor/TensorTestCase.java @@ -89,9 +89,9 @@ public class TensorTestCase { @Test public void testCombineInDimensionIndexed() { - Tensor input = Tensor.from("tensor(input[]):{{input:0}:3, {input:1}:7}"); + Tensor input = Tensor.from("tensor(input[2]):{{input:0}:3, {input:1}:7}"); Tensor result = input.concat(11, "input"); - assertEquals("tensor(input[]):{{input:0}:3.0,{input:1}:7.0,{input:2}:11.0}", result.toString()); + assertEquals("tensor(input[3]):[3.0, 7.0, 11.0]", result.toString()); } /** All functions are more throughly tested in searchlib EvaluationTestCase */ diff --git a/vespajlib/src/test/java/com/yahoo/tensor/serialization/JsonFormatTestCase.java b/vespajlib/src/test/java/com/yahoo/tensor/serialization/JsonFormatTestCase.java index b466307d3b9..4c44cbbf5c7 100644 --- a/vespajlib/src/test/java/com/yahoo/tensor/serialization/JsonFormatTestCase.java +++ b/vespajlib/src/test/java/com/yahoo/tensor/serialization/JsonFormatTestCase.java @@ -33,7 +33,7 @@ public class JsonFormatTestCase { @Test public void testDenseTensor() { - Tensor.Builder builder = Tensor.Builder.of(TensorType.fromSpec("tensor(x{},y{})")); + Tensor.Builder builder = Tensor.Builder.of(TensorType.fromSpec("tensor(x[2],y[2])")); builder.cell().label("x", 0).label("y", 0).value(2.0); builder.cell().label("x", 0).label("y", 1).value(3.0); builder.cell().label("x", 1).label("y", 0).value(5.0); @@ -52,6 +52,21 @@ public class JsonFormatTestCase { } @Test + public void testDenseTensorInDenseForm() { + Tensor.Builder builder = Tensor.Builder.of(TensorType.fromSpec("tensor(x[2],y[3])")); + builder.cell().label("x", 0).label("y", 0).value(2.0); + builder.cell().label("x", 0).label("y", 1).value(3.0); + builder.cell().label("x", 0).label("y", 2).value(4.0); + builder.cell().label("x", 1).label("y", 0).value(5.0); + builder.cell().label("x", 1).label("y", 1).value(6.0); + builder.cell().label("x", 1).label("y", 2).value(7.0); + Tensor expected = builder.build(); + String denseJson = "{\"values\":[2.0, 3.0, 4.0, 5.0, 6.0, 7.0]}"; + Tensor decoded = JsonFormat.decode(expected.type(), denseJson.getBytes(StandardCharsets.UTF_8)); + assertEquals(expected, decoded); + } + + @Test public void testTooManyCells() { TensorType x2 = TensorType.fromSpec("tensor(x[2])"); String json = "{\"cells\":[" + diff --git a/vespalib/src/tests/stllike/hashtable_test.cpp b/vespalib/src/tests/stllike/hashtable_test.cpp index 4948faf450f..877a5dddcb5 100644 --- a/vespalib/src/tests/stllike/hashtable_test.cpp +++ b/vespalib/src/tests/stllike/hashtable_test.cpp @@ -3,6 +3,7 @@ #include <vespa/vespalib/stllike/hashtable.hpp> #include <vespa/vespalib/stllike/hash_fun.h> +#include <vespa/vespalib/stllike/identity.h> #include <vespa/vespalib/testkit/testapp.h> #include <memory> #include <vector> @@ -63,7 +64,7 @@ TEST("require that hashtable can store pairs of <key, unique_ptr to value>") { } template<typename K> using set_hashtable = - hashtable<K, K, vespalib::hash<K>, std::equal_to<K>, std::_Identity<K>>; + hashtable<K, K, vespalib::hash<K>, std::equal_to<K>, Identity>; TEST("require that hashtable<int> can be copied") { set_hashtable<int> table(100); diff --git a/vespalib/src/vespa/vespalib/btree/btreeiterator.h b/vespalib/src/vespa/vespalib/btree/btreeiterator.h index de9637c00f1..7c247cd01da 100644 --- a/vespalib/src/vespa/vespalib/btree/btreeiterator.h +++ b/vespalib/src/vespa/vespalib/btree/btreeiterator.h @@ -302,8 +302,7 @@ protected: * * @param pathSize New tree height (number of levels of internal nodes) */ - void - clearPath(uint32_t pathSize); + VESPA_DLL_LOCAL void clearPath(uint32_t pathSize); public: bool @@ -396,8 +395,7 @@ public: /** * Setup iterator to be empty and not be associated with any tree. */ - void - setupEmpty(); + VESPA_DLL_LOCAL void setupEmpty(); /** * Move iterator to beyond last element in the current tree. diff --git a/vespalib/src/vespa/vespalib/stllike/hash_map.h b/vespalib/src/vespa/vespalib/stllike/hash_map.h index 0de03cb97ee..5eae4cea55e 100644 --- a/vespalib/src/vespa/vespalib/stllike/hash_map.h +++ b/vespalib/src/vespa/vespalib/stllike/hash_map.h @@ -3,6 +3,7 @@ #include "hashtable.h" #include "hash_fun.h" +#include "select.h" namespace vespalib { @@ -13,7 +14,7 @@ public: typedef std::pair<K, V> value_type; typedef K key_type; typedef V mapped_type; - using HashTable = hashtable< K, value_type, H, EQ, std::_Select1st< value_type >, M >; + using HashTable = hashtable< K, value_type, H, EQ, Select1st<value_type>, M >; private: HashTable _ht; public: diff --git a/vespalib/src/vespa/vespalib/stllike/hash_map.hpp b/vespalib/src/vespa/vespalib/stllike/hash_map.hpp index 2ca6b97748f..311a256be76 100644 --- a/vespalib/src/vespa/vespalib/stllike/hash_map.hpp +++ b/vespalib/src/vespa/vespalib/stllike/hash_map.hpp @@ -3,6 +3,7 @@ #include "hash_map_insert.hpp" #include "hashtable.hpp" +#include "select.h" namespace vespalib { @@ -68,11 +69,11 @@ hash_map<K, V, H, EQ, M>::getMemoryUsed() const #define VESPALIB_HASH_MAP_INSTANTIATE_H_E_M(K, V, H, E, M) \ template class vespalib::hash_map<K, V, H, E, M>; \ - template class vespalib::hashtable<K, std::pair<K,V>, H, E, std::_Select1st<std::pair<K,V>>, M>; \ - template vespalib::hashtable<K, std::pair<K,V>, H, E, std::_Select1st<std::pair<K,V>>, M>::insert_result \ - vespalib::hashtable<K, std::pair<K,V>, H, E, std::_Select1st<std::pair<K,V>>, M>::insert(std::pair<K,V> &&); \ - template vespalib::hashtable<K, std::pair<K,V>, H, E, std::_Select1st<std::pair<K,V>>, M>::insert_result \ - vespalib::hashtable<K, std::pair<K,V>, H, E, std::_Select1st<std::pair<K,V>>, M>::insertInternal(std::pair<K,V> &&); \ + template class vespalib::hashtable<K, std::pair<K,V>, H, E, vespalib::Select1st<std::pair<K,V>>, M>; \ + template vespalib::hashtable<K, std::pair<K,V>, H, E, vespalib::Select1st<std::pair<K,V>>, M>::insert_result \ + vespalib::hashtable<K, std::pair<K,V>, H, E, vespalib::Select1st<std::pair<K,V>>, M>::insert(std::pair<K,V> &&); \ + template vespalib::hashtable<K, std::pair<K,V>, H, E, vespalib::Select1st<std::pair<K,V>>, M>::insert_result \ + vespalib::hashtable<K, std::pair<K,V>, H, E, vespalib::Select1st<std::pair<K,V>>, M>::insertInternal(std::pair<K,V> &&); \ template class vespalib::Array<vespalib::hash_node<std::pair<K,V>>>; #define VESPALIB_HASH_MAP_INSTANTIATE_H_E(K, V, H, E) \ diff --git a/vespalib/src/vespa/vespalib/stllike/hash_set.h b/vespalib/src/vespa/vespalib/stllike/hash_set.h index 7a2db4735aa..919e5e8c47f 100644 --- a/vespalib/src/vespa/vespalib/stllike/hash_set.h +++ b/vespalib/src/vespa/vespalib/stllike/hash_set.h @@ -3,6 +3,7 @@ #include "hashtable.h" #include "hash_fun.h" +#include "identity.h" #include <initializer_list> namespace vespalib { @@ -11,7 +12,7 @@ template< typename K, typename H = vespalib::hash<K>, typename EQ = std::equal_t class hash_set { private: - using HashTable = hashtable< K, K, H, EQ, std::_Identity<K>, M>; + using HashTable = hashtable< K, K, H, EQ, Identity, M>; HashTable _ht; public: typedef typename HashTable::iterator iterator; diff --git a/vespalib/src/vespa/vespalib/stllike/hash_set.hpp b/vespalib/src/vespa/vespalib/stllike/hash_set.hpp index 814c96a3c85..19114798806 100644 --- a/vespalib/src/vespa/vespalib/stllike/hash_set.hpp +++ b/vespalib/src/vespa/vespalib/stllike/hash_set.hpp @@ -3,6 +3,7 @@ #include "hash_set_insert.hpp" #include "hashtable.hpp" +#include "identity.h" namespace vespalib { @@ -84,11 +85,11 @@ hash_set<K, H, EQ, M>::insert(K &&value) { #define VESPALIB_HASH_SET_INSTANTIATE(K) \ template class vespalib::hash_set<K>; \ - template class vespalib::hashtable<K, K, vespalib::hash<K>, std::equal_to<>, std::_Identity<K>>; \ + template class vespalib::hashtable<K, K, vespalib::hash<K>, std::equal_to<>, vespalib::Identity>; \ template class vespalib::Array<vespalib::hash_node<K>>; #define VESPALIB_HASH_SET_INSTANTIATE_H(K, H) \ template class vespalib::hash_set<K, H>; \ - template class vespalib::hashtable<K, K, H, std::equal_to<>, std::_Identity<K>>; \ + template class vespalib::hashtable<K, K, H, std::equal_to<>, vespalib::Identity>; \ template class vespalib::Array<vespalib::hash_node<K>>; diff --git a/vespalib/src/vespa/vespalib/stllike/identity.h b/vespalib/src/vespa/vespalib/stllike/identity.h new file mode 100644 index 00000000000..06019fb4690 --- /dev/null +++ b/vespalib/src/vespa/vespalib/stllike/identity.h @@ -0,0 +1,18 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +#include <utility> + +namespace vespalib { + +// Functor which returns its argument unchanged. +// Functionally identical to C++20's std::identity +// TODO remove and replace with std::identity once it is available. +struct Identity { + template <typename T> + constexpr T&& operator()(T&& v) const noexcept { + return std::forward<T>(v); + } +}; + +} diff --git a/vespalib/src/vespa/vespalib/stllike/select.h b/vespalib/src/vespa/vespalib/stllike/select.h new file mode 100644 index 00000000000..28bcd6a01fc --- /dev/null +++ b/vespalib/src/vespa/vespalib/stllike/select.h @@ -0,0 +1,17 @@ +// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +#pragma once + +namespace vespalib { + +// Convenience functor for extracting the first element of a std::pair (or compatible type) +template <typename Pair> +struct Select1st { + constexpr typename Pair::first_type& operator()(Pair& p) const noexcept { + return p.first; + } + constexpr const typename Pair::first_type& operator()(const Pair& p) const noexcept { + return p.first; + } +}; + +} |