diff options
76 files changed, 2171 insertions, 472 deletions
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/InfrastructureMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/InfrastructureMetricSet.java new file mode 100644 index 00000000000..92156c959a0 --- /dev/null +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/InfrastructureMetricSet.java @@ -0,0 +1,180 @@ +// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.model.admin.monitoring; + +import ai.vespa.metrics.*; + +import java.util.Collections; +import java.util.EnumSet; +import java.util.LinkedHashSet; +import java.util.Set; + +import static ai.vespa.metrics.Suffix.average; +import static ai.vespa.metrics.Suffix.count; +import static ai.vespa.metrics.Suffix.last; +import static ai.vespa.metrics.Suffix.max; +import static ai.vespa.metrics.Suffix.sum; + +/** + * Encapsulates vespa service metrics. + * + * @author yngveaasheim + */ +public class InfrastructureMetricSet { + + public static final MetricSet infrastructureMetricSet = new MetricSet("infrastructure", + getInfrastructureMetrics()); + + private static Set<Metric> getInfrastructureMetrics() { + Set<Metric> metrics = new LinkedHashSet<>(); + + metrics.addAll(getConfigServerMetrics()); + metrics.addAll(getControllerMetrics()); + metrics.addAll(getOtherMetrics()); + + return Collections.unmodifiableSet(metrics); + } + + private static Set<Metric> getConfigServerMetrics() { + Set<Metric> metrics = new LinkedHashSet<>(); + + addMetric(metrics, ConfigServerMetrics.REQUESTS.count()); + addMetric(metrics, ConfigServerMetrics.FAILED_REQUESTS.count()); + addMetric(metrics, ConfigServerMetrics.LATENCY, EnumSet.of(max, sum, count)); + addMetric(metrics, ConfigServerMetrics.CACHE_CONFIG_ELEMS.last()); + addMetric(metrics, ConfigServerMetrics.CACHE_CHECKSUM_ELEMS.last()); + addMetric(metrics, ConfigServerMetrics.HOSTS.last()); + addMetric(metrics, ConfigServerMetrics.DELAYED_RESPONSES.count()); + addMetric(metrics, ConfigServerMetrics.SESSION_CHANGE_ERRORS.count()); + + addMetric(metrics, ConfigServerMetrics.ZK_Z_NODES, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last. + addMetric(metrics, ConfigServerMetrics.ZK_AVG_LATENCY, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last. + addMetric(metrics, ConfigServerMetrics.ZK_MAX_LATENCY, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last. + addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last. + addMetric(metrics, ConfigServerMetrics.ZK_CONNECTION_LOST.count()); + addMetric(metrics, ConfigServerMetrics.ZK_RECONNECTED.count()); + addMetric(metrics, ConfigServerMetrics.ZK_SUSPENDED.count()); + addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last. + + // Node repository metrics + addMetric(metrics, ConfigServerMetrics.NODES_NON_ACTIVE_FRACTION.last()); + addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last()); + addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last()); + addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last()); + addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last()); + addMetric(metrics, ConfigServerMetrics.WANT_TO_REBOOT.max()); + addMetric(metrics, ConfigServerMetrics.WANT_TO_RESTART.max()); + addMetric(metrics, ConfigServerMetrics.WANT_TO_RETIRE.max()); + addMetric(metrics, ConfigServerMetrics.RETIRED.max()); + addMetric(metrics, ConfigServerMetrics.WANT_TO_CHANGE_VESPA_VERSION.max()); + addMetric(metrics, ConfigServerMetrics.HAS_WIRE_GUARD_KEY.last()); + addMetric(metrics, ConfigServerMetrics.WANT_TO_DEPROVISION.max()); + addMetric(metrics, ConfigServerMetrics.SUSPENDED.max()); + addMetric(metrics, ConfigServerMetrics.SOME_SERVICES_DOWN.max()); + addMetric(metrics, ConfigServerMetrics.NODE_FAILER_BAD_NODE.last()); + addMetric(metrics, ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LOAD, EnumSet.of(max,average)); + + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_CPU.average()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_MEM.average()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_DISK.average()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_CPU.max()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_MEM.max()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_DISK.max()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_CPU, EnumSet.of(max,average)); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_DISK, EnumSet.of(max,average)); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_MEM, EnumSet.of(max,average)); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_SKEW.last()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PENDING_REDEPLOYMENTS.last()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_ACTIVE_HOSTS.max()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DIRTY_HOSTS.max()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_FAILED_HOSTS.max()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_INACTIVE_HOSTS.max()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PROVISIONED_HOSTS.last()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_READY_HOSTS.max()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_RESERVED_HOSTS.max()); + addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_FAILED_NODES.max()); + + addMetric(metrics, ConfigServerMetrics.RPC_SERVER_WORK_QUEUE_SIZE.average()); + addMetric(metrics, ConfigServerMetrics.DEPLOYMENT_ACTIVATE_MILLIS.last()); + addMetric(metrics, ConfigServerMetrics.DEPLOYMENT_PREPARE_MILLIS.last()); + + addMetric(metrics, ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LOAD, EnumSet.of(max, average)); + addMetric(metrics, ConfigServerMetrics.MAINTENANCE_SUCCESS_FACTOR_DEVIATION.last()); + addMetric(metrics, ConfigServerMetrics.MAINTENANCE_DEPLOYMENT_FAILURE.count()); + addMetric(metrics, ConfigServerMetrics.MAINTENANCE_DEPLOYMENT_TRANSIENT_FAILURE.count()); + addMetric(metrics, ConfigServerMetrics.OVERCOMMITTED_HOSTS.max()); + addMetric(metrics, ConfigServerMetrics.SPARE_HOST_CAPACITY.last()); + addMetric(metrics, ConfigServerMetrics.THROTTLED_NODE_FAILURES.max()); + + // Container metrics that should be stored for the config-server + addMetric(metrics, ContainerMetrics.HANDLED_LATENCY.max()); + addMetric(metrics, ContainerMetrics.HANDLED_REQUESTS.count()); + addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.count()); + addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.count()); + addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.count()); + addMetric(metrics, ContainerMetrics.JDISC_GC_MS.last()); + addMetric(metrics, ContainerMetrics.MEM_HEAP_USED.average()); + addMetric(metrics, ContainerMetrics.SERVER_NUM_REQUESTS.count()); + addMetric(metrics, ContainerMetrics.SERVER_STARTED_MILLIS.last()); + addMetric(metrics, ContainerMetrics.SERVER_TOTAL_SUCCESSFUL_RESPONSE_LATENCY.last()); + + return metrics; + } + + private static Set<Metric> getControllerMetrics() { + Set<Metric> metrics = new LinkedHashSet<>(); + + addMetric(metrics, ControllerMetrics.ATHENZ_REQUEST_ERROR.count()); + addMetric(metrics, ControllerMetrics.ARCHIVE_BUCKET_COUNT.last()); + addMetric(metrics, ControllerMetrics.BILLING_TENANTS.last()); + + addMetric(metrics, ControllerMetrics.DEPLOYMENT_ABORT.count()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_AVERAGE_DURATION, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last. + addMetric(metrics, ControllerMetrics.DEPLOYMENT_CONVERGENCE_FAILURE.count()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_DEPLOYMENT_FAILURE.count()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_ERROR.count()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_FAILING_UPGRADES.last()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_FAILURE_PERCENTAGE.last()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_NODE_COUNT_BY_OS_VERSION.max()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_OS_CHANGE_DURATION.max()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_START.count()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_SUCCESS.count()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_TEST_FAILURE.count()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_WARNINGS.last()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_ENDPOINT_CERTIFICATE_TIMEOUT.count()); + addMetric(metrics, ControllerMetrics.DEPLOYMENT_BROKEN_SYSTEM_VERSION.last()); + + addMetric(metrics, ControllerMetrics.OPERATION_APPLICATION.last()); + addMetric(metrics, ControllerMetrics.OPERATION_CHANGEMANAGEMENT.last()); + addMetric(metrics, ControllerMetrics.OPERATION_CONFIGSERVER.last()); + addMetric(metrics, ControllerMetrics.OPERATION_CONTROLLER.last()); + addMetric(metrics, ControllerMetrics.OPERATION_FLAGS.last()); + addMetric(metrics, ControllerMetrics.OPERATION_OS.last()); + addMetric(metrics, ControllerMetrics.OPERATION_ROUTING.last()); + addMetric(metrics, ControllerMetrics.OPERATION_ZONE.last()); + + addMetric(metrics, ControllerMetrics.REMAINING_ROTATIONS.last()); + addMetric(metrics, ControllerMetrics.DNS_QUEUED_REQUESTS.last()); + addMetric(metrics, ControllerMetrics.ZMS_QUOTA_USAGE.last()); + addMetric(metrics, ControllerMetrics.COREDUMP_PROCESSED.count()); + + addMetric(metrics, ControllerMetrics.METERING_AGE_SECONDS.last()); + + return metrics; + } + + private static Set<Metric> getOtherMetrics() { + Set<Metric> metrics = new LinkedHashSet<>(); + + addMetric(metrics, LogdMetrics.LOGD_PROCESSED_LINES.count()); + + return metrics; + } + + private static void addMetric(Set<Metric> metrics, String nameWithSuffix) { + metrics.add(new Metric(nameWithSuffix)); + } + + private static void addMetric(Set<Metric> metrics, VespaMetrics metric, EnumSet<Suffix> suffixes) { + suffixes.forEach(suffix -> metrics.add(new Metric(metric.baseName() + "." + suffix.suffix()))); + } +} diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java index 8a2bae364a1..a0d866fb001 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java @@ -2,7 +2,6 @@ package com.yahoo.vespa.model.admin.monitoring; import ai.vespa.metrics.ClusterControllerMetrics; -import ai.vespa.metrics.ConfigServerMetrics; import ai.vespa.metrics.ContainerMetrics; import ai.vespa.metrics.DistributorMetrics; import ai.vespa.metrics.LogdMetrics; @@ -53,7 +52,6 @@ public class VespaMetricSet { metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getSearchChainMetrics()); metrics.addAll(getContainerMetrics()); - metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); @@ -113,55 +111,6 @@ public class VespaMetricSet { return metrics; } - private static Set<Metric> getConfigServerMetrics() { - Set<Metric> metrics = new LinkedHashSet<>(); - - addMetric(metrics, ConfigServerMetrics.REQUESTS.count()); - addMetric(metrics, ConfigServerMetrics.FAILED_REQUESTS.count()); - addMetric(metrics, ConfigServerMetrics.LATENCY, EnumSet.of(max, sum, count)); - addMetric(metrics, ConfigServerMetrics.CACHE_CONFIG_ELEMS.last()); - addMetric(metrics, ConfigServerMetrics.CACHE_CHECKSUM_ELEMS.last()); - addMetric(metrics, ConfigServerMetrics.HOSTS.last()); - addMetric(metrics, ConfigServerMetrics.DELAYED_RESPONSES.count()); - addMetric(metrics, ConfigServerMetrics.SESSION_CHANGE_ERRORS.count()); - - addMetric(metrics, ConfigServerMetrics.ZK_Z_NODES.last()); - addMetric(metrics, ConfigServerMetrics.ZK_AVG_LATENCY.last()); - addMetric(metrics, ConfigServerMetrics.ZK_MAX_LATENCY.last()); - addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS.last()); - addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS.last()); - - // Node repository metrics - addMetric(metrics, ConfigServerMetrics.NODES_NON_ACTIVE_FRACTION.last()); - addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last()); - addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last()); - addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last()); - addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last()); - addMetric(metrics, ConfigServerMetrics.WANT_TO_REBOOT.max()); - addMetric(metrics, ConfigServerMetrics.WANT_TO_RESTART.max()); - addMetric(metrics, ConfigServerMetrics.RETIRED.max()); - addMetric(metrics, ConfigServerMetrics.WANT_TO_CHANGE_VESPA_VERSION.max()); - addMetric(metrics, ConfigServerMetrics.HAS_WIRE_GUARD_KEY.last()); - addMetric(metrics, ConfigServerMetrics.WANT_TO_DEPROVISION.max()); - addMetric(metrics, ConfigServerMetrics.SUSPENDED.max()); - addMetric(metrics, ConfigServerMetrics.SOME_SERVICES_DOWN.max()); - addMetric(metrics, ConfigServerMetrics.NODE_FAILER_BAD_NODE.last()); - addMetric(metrics, ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LOAD, EnumSet.of(max,average)); - - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_CPU.average()); - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_MEM.average()); - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_DISK.average()); - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_CPU.max()); - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_MEM.max()); - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_DISK.max()); - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_CPU, EnumSet.of(max,average)); - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_DISK, EnumSet.of(max,average)); - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_MEM, EnumSet.of(max,average)); - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_SKEW.last()); - addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PENDING_REDEPLOYMENTS.last()); - - return metrics; - } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java index 597a2da0f2c..d0a5b1bbe43 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java @@ -13,6 +13,7 @@ import static com.yahoo.vespa.model.admin.monitoring.NetworkMetrics.networkMetri import static com.yahoo.vespa.model.admin.monitoring.SystemMetrics.systemMetricSet; import static com.yahoo.vespa.model.admin.monitoring.DefaultVespaMetrics.defaultVespaMetricSet; import static com.yahoo.vespa.model.admin.monitoring.VespaMetricSet.vespaMetricSet; +import static com.yahoo.vespa.model.admin.monitoring.InfrastructureMetricSet.infrastructureMetricSet; /** * A data object for predefined metric sets. @@ -27,7 +28,8 @@ public class PredefinedMetricSets { vespaMetricSet, systemMetricSet, networkMetricSet, - autoscalingMetricSet + autoscalingMetricSet, + infrastructureMetricSet ); public static Map<String, MetricSet> get() { return sets; } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java index 34ea41384bc..bf8c96db614 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java @@ -303,7 +303,7 @@ public class ContentSearchCluster extends TreeConfigProducer<AnyConfigProducer> if (element == null) { searchNode = SearchNode.create(parent, "" + node.getDistributionKey(), node.getDistributionKey(), spec, clusterName, node, flushOnShutdown, tuning, resourceLimits, deployState.isHosted(), - fractionOfMemoryReserved, deployState.featureFlags()); + fractionOfMemoryReserved, redundancy, deployState.featureFlags()); searchNode.setHostResource(node.getHostResource()); searchNode.initService(deployState); @@ -312,7 +312,7 @@ public class ContentSearchCluster extends TreeConfigProducer<AnyConfigProducer> tls.initService(deployState); } else { searchNode = new SearchNode.Builder(""+node.getDistributionKey(), spec, clusterName, node, flushOnShutdown, - tuning, resourceLimits, fractionOfMemoryReserved) + tuning, resourceLimits, fractionOfMemoryReserved, redundancy) .build(deployState, parent, element.getXml()); tls = new TransactionLogServer.Builder(clusterName, syncTransactionLog).build(deployState, searchNode, element.getXml()); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java b/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java index 1ad99404823..02f15ed06cd 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/search/NodeResourcesTuning.java @@ -4,6 +4,7 @@ package com.yahoo.vespa.model.search; import com.yahoo.config.provision.NodeResources; import com.yahoo.vespa.config.search.core.ProtonConfig; import com.yahoo.vespa.model.Host; +import com.yahoo.vespa.model.content.Redundancy; import static java.lang.Long.min; import static java.lang.Long.max; @@ -27,13 +28,16 @@ public class NodeResourcesTuning implements ProtonConfig.Producer { private final NodeResources resources; private final int threadsPerSearch; private final double fractionOfMemoryReserved; + private final Redundancy redundancy; public NodeResourcesTuning(NodeResources resources, int threadsPerSearch, - double fractionOfMemoryReserved) { + double fractionOfMemoryReserved, + Redundancy redundancy) { this.resources = resources; this.threadsPerSearch = threadsPerSearch; this.fractionOfMemoryReserved = fractionOfMemoryReserved; + this.redundancy = redundancy; } @Override @@ -57,7 +61,7 @@ public class NodeResourcesTuning implements ProtonConfig.Producer { ProtonConfig.Documentdb dbCfg = builder.build(); if (dbCfg.mode() != ProtonConfig.Documentdb.Mode.Enum.INDEX) { long numDocs = (long)usableMemoryGb() * GB / MEMORY_COST_PER_DOCUMENT_STORE_ONLY; - builder.allocation.initialnumdocs(numDocs); + builder.allocation.initialnumdocs(numDocs/redundancy.readyCopies()); } } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java b/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java index c3de655af92..f238d30308b 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java @@ -21,6 +21,7 @@ import com.yahoo.vespa.model.admin.monitoring.Monitoring; import com.yahoo.vespa.model.application.validation.RestartConfigs; import com.yahoo.vespa.model.builder.xml.dom.VespaDomBuilder; import com.yahoo.vespa.model.content.ContentNode; +import com.yahoo.vespa.model.content.Redundancy; import com.yahoo.vespa.model.content.ResourceLimits; import com.yahoo.vespa.model.filedistribution.FileDistributionConfigProducer; import com.yahoo.vespa.model.filedistribution.FileDistributionConfigProvider; @@ -69,6 +70,7 @@ public class SearchNode extends AbstractService implements private final Optional<Tuning> tuning; private final Optional<ResourceLimits> resourceLimits; private final double fractionOfMemoryReserved; + private final Redundancy redundancy; public static class Builder extends VespaDomBuilder.DomConfigProducerBuilderBase<SearchNode> { @@ -80,10 +82,11 @@ public class SearchNode extends AbstractService implements private final Optional<Tuning> tuning; private final Optional<ResourceLimits> resourceLimits; private final double fractionOfMemoryReserved; + private final Redundancy redundancy; public Builder(String name, NodeSpec nodeSpec, String clusterName, ContentNode node, boolean flushOnShutdown, Optional<Tuning> tuning, Optional<ResourceLimits> resourceLimits, - double fractionOfMemoryReserved) { + double fractionOfMemoryReserved, Redundancy redundancy) { this.name = name; this.nodeSpec = nodeSpec; this.clusterName = clusterName; @@ -92,23 +95,26 @@ public class SearchNode extends AbstractService implements this.tuning = tuning; this.resourceLimits = resourceLimits; this.fractionOfMemoryReserved = fractionOfMemoryReserved; + this.redundancy = redundancy; } @Override - protected SearchNode doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element producerSpec) { + protected SearchNode doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, + Element producerSpec) { return SearchNode.create(ancestor, name, contentNode.getDistributionKey(), nodeSpec, clusterName, contentNode, flushOnShutdown, tuning, resourceLimits, deployState.isHosted(), - fractionOfMemoryReserved, deployState.featureFlags()); + fractionOfMemoryReserved, redundancy, deployState.featureFlags()); } } public static SearchNode create(TreeConfigProducer<?> parent, String name, int distributionKey, NodeSpec nodeSpec, String clusterName, AbstractService serviceLayerService, boolean flushOnShutdown, - Optional<Tuning> tuning, Optional<ResourceLimits> resourceLimits, boolean isHostedVespa, - double fractionOfMemoryReserved, ModelContext.FeatureFlags featureFlags) { + Optional<Tuning> tuning, Optional<ResourceLimits> resourceLimits, + boolean isHostedVespa, double fractionOfMemoryReserved, Redundancy redundancy, + ModelContext.FeatureFlags featureFlags) { SearchNode node = new SearchNode(parent, name, distributionKey, nodeSpec, clusterName, serviceLayerService, flushOnShutdown, - tuning, resourceLimits, isHostedVespa, fractionOfMemoryReserved); + tuning, resourceLimits, isHostedVespa, fractionOfMemoryReserved, redundancy); if (featureFlags.loadCodeAsHugePages()) { node.addEnvironmentVariable("VESPA_LOAD_CODE_AS_HUGEPAGES", true); } @@ -121,7 +127,7 @@ public class SearchNode extends AbstractService implements private SearchNode(TreeConfigProducer<?> parent, String name, int distributionKey, NodeSpec nodeSpec, String clusterName, AbstractService serviceLayerService, boolean flushOnShutdown, Optional<Tuning> tuning, Optional<ResourceLimits> resourceLimits, boolean isHostedVespa, - double fractionOfMemoryReserved) { + double fractionOfMemoryReserved, Redundancy redundancy) { super(parent, name); this.distributionKey = distributionKey; this.serviceLayerService = serviceLayerService; @@ -138,6 +144,7 @@ public class SearchNode extends AbstractService implements // Properties are set in DomSearchBuilder this.tuning = tuning; this.resourceLimits = resourceLimits; + this.redundancy = redundancy; setPropertiesElastic(clusterName, distributionKey); addEnvironmentVariable("OMP_NUM_THREADS", 1); } @@ -279,7 +286,7 @@ public class SearchNode extends AbstractService implements if (nodeResources.isPresent()) { var nodeResourcesTuning = new NodeResourcesTuning(nodeResources.get(), tuning.map(Tuning::threadsPerSearch).orElse(1), - fractionOfMemoryReserved); + fractionOfMemoryReserved, redundancy); nodeResourcesTuning.getConfig(builder); tuning.ifPresent(t -> t.getConfig(builder)); diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java index ef2767249a5..b9909214dfd 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java @@ -44,6 +44,7 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Optional; import java.util.OptionalInt; @@ -1525,7 +1526,7 @@ public class ContentClusterTest extends ContentBaseTest { i, i, i); } return services + - String.format(" </group>" + + String.format(Locale.US, " </group>" + " <tuning>" + " <cluster-controller>" + " <groups-allowed-down-ratio>%f</groups-allowed-down-ratio>" + diff --git a/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java b/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java index 8e719fa90c3..d5215ab5752 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/search/NodeResourcesTuningTest.java @@ -6,9 +6,9 @@ import com.yahoo.config.provision.Flavor; import com.yahoo.config.provisioning.FlavorsConfig; import com.yahoo.vespa.config.search.core.ProtonConfig; import com.yahoo.vespa.model.container.ApplicationContainerCluster; +import com.yahoo.vespa.model.content.Redundancy; import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.List; import static com.yahoo.vespa.model.Host.memoryOverheadGb; @@ -22,7 +22,6 @@ import static com.yahoo.vespa.model.search.NodeResourcesTuning.GB; public class NodeResourcesTuningTest { private static final double delta = 0.00001; - private static final double combinedFactor = 1 - 18.0/100; private static final double DEFAULT_MEMORY_GAIN = 0.08; @Test @@ -42,7 +41,7 @@ public class NodeResourcesTuningTest { assertEquals(0.7, memoryOverheadGb, delta); } - private ProtonConfig getProtonMemoryConfig(List<Pair<String, String>> sdAndMode, double gb) { + private ProtonConfig getProtonMemoryConfig(List<Pair<String, String>> sdAndMode, double gb, Redundancy redundancy) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); for (Pair<String, String> sdMode : sdAndMode) { builder.documentdb.add(new ProtonConfig.Documentdb.Builder() @@ -50,24 +49,25 @@ public class NodeResourcesTuningTest { .configid("some/config/id/" + sdMode.getFirst()) .mode(ProtonConfig.Documentdb.Mode.Enum.valueOf(sdMode.getSecond()))); } - return configFromMemorySetting(gb, builder); + return configFromMemorySetting(gb, builder, redundancy); } - private void verify_that_initial_numdocs_is_dependent_of_mode() { - ProtonConfig cfg = getProtonMemoryConfig(Arrays.asList(new Pair<>("a", "INDEX"), new Pair<>("b", "STREAMING"), new Pair<>("c", "STORE_ONLY")), 24 + memoryOverheadGb); + private void verify_that_initial_numdocs_is_dependent_of_mode(int readyCopies) { + ProtonConfig cfg = getProtonMemoryConfig(List.of(new Pair<>("a", "INDEX"), new Pair<>("b", "STREAMING"), new Pair<>("c", "STORE_ONLY")), + 24 + memoryOverheadGb, new Redundancy(readyCopies+1,readyCopies+1, readyCopies,1, readyCopies)); assertEquals(3, cfg.documentdb().size()); assertEquals(1024, cfg.documentdb(0).allocation().initialnumdocs()); assertEquals("a", cfg.documentdb(0).inputdoctypename()); - assertEquals(24 * GB / 46, cfg.documentdb(1).allocation().initialnumdocs()); + assertEquals(24 * GB / (46 * readyCopies), cfg.documentdb(1).allocation().initialnumdocs()); assertEquals("b", cfg.documentdb(1).inputdoctypename()); - assertEquals(24 * GB / 46, cfg.documentdb(2).allocation().initialnumdocs()); + assertEquals(24 * GB / (46 * readyCopies), cfg.documentdb(2).allocation().initialnumdocs()); assertEquals("c", cfg.documentdb(2).inputdoctypename()); } @Test void require_that_initial_numdocs_is_dependent_of_mode_and_searchablecopies() { - verify_that_initial_numdocs_is_dependent_of_mode(); - + verify_that_initial_numdocs_is_dependent_of_mode(1); + verify_that_initial_numdocs_is_dependent_of_mode(2); } @Test @@ -227,9 +227,9 @@ public class NodeResourcesTuningTest { return getConfig(new FlavorsConfig.Flavor.Builder().minMainMemoryAvailableGb(memoryGb), fractionOfMemoryReserved); } - private static ProtonConfig configFromMemorySetting(double memoryGb, ProtonConfig.Builder builder) { + private static ProtonConfig configFromMemorySetting(double memoryGb, ProtonConfig.Builder builder, Redundancy redundancy) { return getConfig(new FlavorsConfig.Flavor.Builder() - .minMainMemoryAvailableGb(memoryGb), builder); + .minMainMemoryAvailableGb(memoryGb), builder, redundancy); } private static ProtonConfig configFromNumCoresSetting(double numCores) { @@ -255,7 +255,11 @@ public class NodeResourcesTuningTest { } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder) { - return getConfig(flavorBuilder, protonBuilder,1); + return getConfig(flavorBuilder, protonBuilder, new Redundancy(1,1,1,1,1)); + } + private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, + Redundancy redundancy) { + return getConfig(flavorBuilder, protonBuilder,1, redundancy); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, double fractionOfMemoryReserved) { return getConfig(flavorBuilder, protonBuilder, 1, fractionOfMemoryReserved); @@ -263,13 +267,24 @@ public class NodeResourcesTuningTest { private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, int numThreadsPerSearch) { - return getConfig(flavorBuilder, protonBuilder, numThreadsPerSearch, 0); + return getConfig(flavorBuilder, protonBuilder, numThreadsPerSearch, new Redundancy(1,1,1,1,1)); + } + + private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, + int numThreadsPerSearch, Redundancy redundancy) { + return getConfig(flavorBuilder, protonBuilder, numThreadsPerSearch, 0, redundancy); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, int numThreadsPerSearch, double fractionOfMemoryReserved) { + return getConfig(flavorBuilder, protonBuilder, numThreadsPerSearch, fractionOfMemoryReserved, + new Redundancy(1,1,1,1,1)); + } + private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, + int numThreadsPerSearch, double fractionOfMemoryReserved, Redundancy redundancy) { flavorBuilder.name("my_flavor"); - NodeResourcesTuning tuning = new NodeResourcesTuning(new Flavor(new FlavorsConfig.Flavor(flavorBuilder)).resources(), numThreadsPerSearch, fractionOfMemoryReserved); + NodeResourcesTuning tuning = new NodeResourcesTuning(new Flavor(new FlavorsConfig.Flavor(flavorBuilder)).resources(), + numThreadsPerSearch, fractionOfMemoryReserved, redundancy); tuning.getConfig(protonBuilder); return new ProtonConfig(protonBuilder); } diff --git a/config-model/src/test/java/com/yahoo/vespa/model/search/test/SearchNodeTest.java b/config-model/src/test/java/com/yahoo/vespa/model/search/test/SearchNodeTest.java index f60441bac03..b357a3e6718 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/search/test/SearchNodeTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/search/test/SearchNodeTest.java @@ -10,6 +10,7 @@ import com.yahoo.vespa.config.search.core.ProtonConfig; import com.yahoo.vespa.defaults.Defaults; import com.yahoo.vespa.model.Host; import com.yahoo.vespa.model.HostResource; +import com.yahoo.vespa.model.content.Redundancy; import com.yahoo.vespa.model.search.NodeSpec; import com.yahoo.vespa.model.search.SearchNode; import com.yahoo.vespa.model.search.TransactionLogServer; @@ -49,7 +50,7 @@ public class SearchNodeTest { private static SearchNode createSearchNode(MockRoot root, String name, int distributionKey, NodeSpec nodeSpec, boolean flushOnShutDown, boolean isHosted, ModelContext.FeatureFlags featureFlags) { return SearchNode.create(root, name, distributionKey, nodeSpec, "mycluster", null, flushOnShutDown, - Optional.empty(), Optional.empty(), isHosted, 0.0, featureFlags); + Optional.empty(), Optional.empty(), isHosted, 0.0, new Redundancy(1,1,1,1,1), featureFlags); } private static SearchNode createSearchNode(MockRoot root) { diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java index 7c5c15e23e6..b95c0cce149 100644 --- a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java +++ b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/ZoneId.java @@ -15,6 +15,17 @@ import java.util.Objects; */ public class ZoneId { + private static final ZoneId CONTROLLER = from(Environment.prod, RegionName.from("controller")); + + /** + * The ZoneId associated with the controller, distinct from all other zones in the system, but a constant across systems. + * + * <p>The controller may also be associated with a real zone, i.e. with a region defining the location like + * aws-us-east-1a. Because such a zone ID is different for different systems, and may clash with a prod zone in the + * same region and system, the virtual zone ID is often used.</p> + */ + public static ZoneId ofVirtualControllerZone() { return CONTROLLER; } + private final Environment environment; private final RegionName region; diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java index 062133b6b6e..a62bf25bc4c 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.deploy; +import ai.vespa.metrics.ConfigServerMetrics; import com.yahoo.concurrent.UncheckedTimeoutException; import com.yahoo.config.FileReference; import com.yahoo.config.application.api.DeployLogger; @@ -105,7 +106,7 @@ public class Deployment implements com.yahoo.config.provision.Deployment { if (prepared) return; PrepareParams params = this.params.get(); - try (ActionTimer timer = applicationRepository.timerFor(params.getApplicationId(), "deployment.prepareMillis")) { + try (ActionTimer timer = applicationRepository.timerFor(params.getApplicationId(), ConfigServerMetrics.DEPLOYMENT_PREPARE_MILLIS.baseName())) { this.configChangeActions = sessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } catch (Exception e) { @@ -126,7 +127,7 @@ public class Deployment implements com.yahoo.config.provision.Deployment { waitForResourcesOrTimeout(params, session, provisioner); ApplicationId applicationId = session.getApplicationId(); - try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { + try (ActionTimer timer = applicationRepository.timerFor(applicationId, ConfigServerMetrics.DEPLOYMENT_ACTIVATE_MILLIS.baseName())) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandler.java index 61fe2f77224..bd6e0f90b54 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandler.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandler.java @@ -449,6 +449,7 @@ public class ApplicationHandler extends HttpHandler { Cursor serviceObject = serviceArray.addObject(); String hostName = serviceInfo.getHostName(); int statePort = ConfigConvergenceChecker.getStatePort(serviceInfo).get(); + serviceInfo.getProperty("clustername").ifPresent(clusterName -> serviceObject.setString("clusterName", clusterName)); serviceObject.setString("host", hostName); serviceObject.setLong("port", statePort); serviceObject.setString("type", serviceInfo.getServiceType()); diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java index 306ba6da6f9..7a0ab6d2a23 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ApplicationHandlerTest.java @@ -668,7 +668,7 @@ public class ApplicationHandlerTest { { HttpServiceListResponse response = - new HttpServiceListResponse(new ServiceListResponse(Map.of(createServiceInfo(hostname, port), 3L), + new HttpServiceListResponse(new ServiceListResponse(Map.of(createServiceInfo(hostname, port, Optional.empty()), 3L), 3L, 3L), requestUrl); @@ -698,8 +698,8 @@ public class ApplicationHandlerTest { URI serviceUrl2 = URI.create("https://configserver/serviceconvergence/" + hostAndPort2); Map<ServiceInfo, Long> serviceInfos = new HashMap<>(); - serviceInfos.put(createServiceInfo(hostname, port), 4L); - serviceInfos.put(createServiceInfo(hostname2, port2), 3L); + serviceInfos.put(createServiceInfo(hostname, port, Optional.empty()), 4L); + serviceInfos.put(createServiceInfo(hostname2, port2, Optional.of("foo")), 3L); HttpServiceListResponse response = new HttpServiceListResponse(new ServiceListResponse(serviceInfos, @@ -712,18 +712,19 @@ public class ApplicationHandlerTest { " \"host\": \"" + hostname + "\",\n" + " \"port\": " + port + ",\n" + " \"type\": \"container\",\n" + - " \"url\": \"" + serviceUrl.toString() + "\",\n" + + " \"url\": \"" + serviceUrl + "\",\n" + " \"currentGeneration\":" + 4 + "\n" + " },\n" + " {\n" + + " \"clusterName\": \"foo\",\n" + " \"host\": \"" + hostname2 + "\",\n" + " \"port\": " + port2 + ",\n" + " \"type\": \"container\",\n" + - " \"url\": \"" + serviceUrl2.toString() + "\",\n" + + " \"url\": \"" + serviceUrl2 + "\",\n" + " \"currentGeneration\":" + 3 + "\n" + " }\n" + " ],\n" + - " \"url\": \"" + requestUrl.toString() + "\",\n" + + " \"url\": \"" + requestUrl + "\",\n" + " \"currentGeneration\": 3,\n" + " \"wantedGeneration\": 4,\n" + " \"converged\": false\n" + @@ -899,11 +900,12 @@ public class ApplicationHandlerTest { assertEquals(status, response.getStatus()); } - private ServiceInfo createServiceInfo(String hostname, int port) { + private ServiceInfo createServiceInfo(String hostname, int port, Optional<String> clusterName) { return new ServiceInfo("container", "container", List.of(new PortInfo(port, List.of("state"))), - Map.of(), + clusterName.map(name -> Map.of("clustername", name)) + .orElseGet(Map::of), "configId", hostname); } diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/state/StateHandler.java b/container-core/src/main/java/com/yahoo/container/jdisc/state/StateHandler.java index af98e380f2a..33fa0bd7bab 100644 --- a/container-core/src/main/java/com/yahoo/container/jdisc/state/StateHandler.java +++ b/container-core/src/main/java/com/yahoo/container/jdisc/state/StateHandler.java @@ -290,7 +290,7 @@ public class StateHandler extends AbstractRequestHandler implements CapabilityRe Tuple latencySeconds = new Tuple(NULL_DIMENSIONS, "latencySeconds", null); for (Map.Entry<MetricDimensions, MetricSet> entry : snapshot) { MetricSet metricSet = entry.getValue(); - MetricValue val = metricSet.get(ContainerMetrics.SERVER_TOTAL_SUCCESFUL_RESPONSE_LATENCY.baseName()); + MetricValue val = metricSet.get(ContainerMetrics.SERVER_TOTAL_SUCCESSFUL_RESPONSE_LATENCY.baseName()); if (val instanceof GaugeMetric gauge) { latencySeconds.add(GaugeMetric.newInstance(gauge.getLast() / 1000, gauge.getMax() / 1000, diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java index 327640cb7ed..2a382d22a68 100644 --- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java +++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java @@ -43,7 +43,7 @@ class MetricDefinitions { static final String NUM_SUCCESSFUL_WRITES = ContainerMetrics.SERVER_NUM_SUCCESSFUL_RESPONSE_WRITES.baseName(); static final String NUM_FAILED_WRITES = ContainerMetrics.SERVER_NUM_FAILED_RESPONSE_WRITES.baseName(); - static final String TOTAL_SUCCESSFUL_LATENCY = ContainerMetrics.SERVER_TOTAL_SUCCESFUL_RESPONSE_LATENCY.baseName(); + static final String TOTAL_SUCCESSFUL_LATENCY = ContainerMetrics.SERVER_TOTAL_SUCCESSFUL_RESPONSE_LATENCY.baseName(); static final String TOTAL_FAILED_LATENCY = ContainerMetrics.SERVER_TOTAL_FAILED_RESPONSE_LATENCY.baseName(); static final String TIME_TO_FIRST_BYTE = ContainerMetrics.SERVER_TIME_TO_FIRST_BYTE.baseName(); diff --git a/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java b/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java index 639e5b592c3..783cca0ca2d 100644 --- a/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java +++ b/container-search/src/main/java/com/yahoo/search/yql/YqlParser.java @@ -409,12 +409,31 @@ public class YqlParser implements Parser { return fillWeightedSet(ast, args.get(1), new DotProductItem(getIndex(args.get(0)))); } + private ParsedDegree degreesFromArg(OperatorNode<ExpressionOperator> ast, boolean first) { + Object arg = null; + switch (ast.getOperator()) { + case LITERAL: + arg = ast.getArgument(0); + break; + case READ_FIELD: + arg = ast.getArgument(1); + break; + default: + throw newUnexpectedArgumentException(ast.getOperator(), + ExpressionOperator.READ_FIELD, ExpressionOperator.PROPREF); + } + if (arg instanceof Number n) { + return new ParsedDegree(n.doubleValue(), first, !first); + } + return ParsedDegree.fromString(arg.toString(), first, !first); + } + private Item buildGeoLocation(OperatorNode<ExpressionOperator> ast) { List<OperatorNode<ExpressionOperator>> args = ast.getArgument(1); Preconditions.checkArgument(args.size() == 4, "Expected 4 arguments, got %s.", args.size()); String field = fetchFieldName(args.get(0)); - var coord_1 = ParsedDegree.fromString(fetchLiteral(args.get(1)), true, false); - var coord_2 = ParsedDegree.fromString(fetchLiteral(args.get(2)), false, true); + var coord_1 = degreesFromArg(args.get(1), true); + var coord_2 = degreesFromArg(args.get(2), false); double radius = DistanceParser.parse(fetchLiteral(args.get(3))); var loc = new Location(); if (coord_1.isLatitude && coord_2.isLongitude) { diff --git a/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java b/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java index 33f840c7af0..911acf67daf 100644 --- a/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java +++ b/container-search/src/test/java/com/yahoo/search/yql/YqlParserTestCase.java @@ -645,6 +645,8 @@ public class YqlParserTestCase { "Invalid geoLocation coordinates 'Latitude: 2.0 degrees' and 'Latitude: 5.0 degrees'")); assertParse("select foo from bar where geoLocation(workplace, -12, -34, \"-77 d\")", "GEO_LOCATION workplace:(2,-34000000,-12000000,-1,0,1,0,4201111954)"); + assertParse("select * from test_index where geoLocation(coordinate, 0.000010, 0.000010, \"10.000000 km\")", + "GEO_LOCATION coordinate:(2,10,10,90133,0,1,0,4294967294)"); } @Test diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/ConfigServerFlagsTarget.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/ConfigServerFlagsTarget.java index 5842ee3c3c0..585000cf22c 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/ConfigServerFlagsTarget.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/ConfigServerFlagsTarget.java @@ -1,9 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.api.systemflags.v1; +import com.yahoo.config.provision.CloudName; import com.yahoo.config.provision.SystemName; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.athenz.api.AthenzIdentity; +import com.yahoo.vespa.flags.json.FlagData; import java.net.URI; import java.util.List; @@ -20,12 +22,14 @@ import static com.yahoo.vespa.hosted.controller.api.systemflags.v1.FlagsTarget.z */ class ConfigServerFlagsTarget implements FlagsTarget { private final SystemName system; + private final CloudName cloud; private final ZoneId zone; private final URI endpoint; private final AthenzIdentity identity; - ConfigServerFlagsTarget(SystemName system, ZoneId zone, URI endpoint, AthenzIdentity identity) { + ConfigServerFlagsTarget(SystemName system, CloudName cloud, ZoneId zone, URI endpoint, AthenzIdentity identity) { this.system = Objects.requireNonNull(system); + this.cloud = Objects.requireNonNull(cloud); this.zone = Objects.requireNonNull(zone); this.endpoint = Objects.requireNonNull(endpoint); this.identity = Objects.requireNonNull(identity); @@ -36,16 +40,32 @@ class ConfigServerFlagsTarget implements FlagsTarget { @Override public Optional<AthenzIdentity> athenzHttpsIdentity() { return Optional.of(identity); } @Override public String asString() { return String.format("%s.%s", system.value(), zone.value()); } - @Override public boolean equals(Object o) { + @Override + public FlagData partiallyResolveFlagData(FlagData data) { + return FlagsTarget.partialResolve(data, system, cloud, zone); + } + + @Override + public String toString() { + return "ConfigServerFlagsTarget{" + + "system=" + system + + ", cloud=" + cloud + + ", zone=" + zone + + ", endpoint=" + endpoint + + ", identity=" + identity + + '}'; + } + + @Override + public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ConfigServerFlagsTarget that = (ConfigServerFlagsTarget) o; - return system == that.system && - Objects.equals(zone, that.zone) && - Objects.equals(endpoint, that.endpoint) && - Objects.equals(identity, that.identity); + return system == that.system && cloud.equals(that.cloud) && zone.equals(that.zone) && endpoint.equals(that.endpoint) && identity.equals(that.identity); } - @Override public int hashCode() { return Objects.hash(system, zone, endpoint, identity); } + @Override + public int hashCode() { + return Objects.hash(system, cloud, zone, endpoint, identity); + } } - diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/ControllerFlagsTarget.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/ControllerFlagsTarget.java index efeaf12de1c..043c6ea5963 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/ControllerFlagsTarget.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/ControllerFlagsTarget.java @@ -1,8 +1,11 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.api.systemflags.v1; +import com.yahoo.config.provision.CloudName; import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.vespa.athenz.api.AthenzIdentity; +import com.yahoo.vespa.flags.json.FlagData; import java.net.URI; import java.util.List; @@ -18,20 +21,44 @@ import static com.yahoo.vespa.hosted.controller.api.systemflags.v1.FlagsTarget.s */ class ControllerFlagsTarget implements FlagsTarget { private final SystemName system; + private final CloudName cloud; + private final ZoneId zone; - ControllerFlagsTarget(SystemName system) { this.system = Objects.requireNonNull(system); } + ControllerFlagsTarget(SystemName system, CloudName cloud, ZoneId zone) { + this.system = Objects.requireNonNull(system); + this.cloud = Objects.requireNonNull(cloud); + this.zone = Objects.requireNonNull(zone); + } @Override public List<String> flagDataFilesPrioritized() { return List.of(controllerFile(system), systemFile(system), defaultFile()); } @Override public URI endpoint() { return URI.create("https://localhost:4443/"); } // Note: Cannot use VIPs for controllers due to network configuration on AWS @Override public Optional<AthenzIdentity> athenzHttpsIdentity() { return Optional.empty(); } @Override public String asString() { return String.format("%s.controller", system.value()); } - @Override public boolean equals(Object o) { + @Override + public FlagData partiallyResolveFlagData(FlagData data) { + return FlagsTarget.partialResolve(data, system, cloud, zone); + } + + @Override + public String toString() { + return "ControllerFlagsTarget{" + + "system=" + system + + ", cloud=" + cloud + + ", zone=" + zone + + '}'; + } + + @Override + public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ControllerFlagsTarget that = (ControllerFlagsTarget) o; - return system == that.system; + return system == that.system && cloud.equals(that.cloud) && zone.equals(that.zone); } - @Override public int hashCode() { return Objects.hash(system); } + @Override + public int hashCode() { + return Objects.hash(system, cloud, zone); + } } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java index 1c8e68ff378..bad53620c81 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java @@ -1,20 +1,32 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.api.systemflags.v1; +import com.yahoo.config.provision.CloudName; import com.yahoo.config.provision.Environment; +import com.yahoo.config.provision.RegionName; import com.yahoo.config.provision.SystemName; import com.yahoo.config.provision.zone.ZoneApi; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.config.provision.zone.ZoneList; import com.yahoo.vespa.athenz.api.AthenzIdentity; +import com.yahoo.vespa.flags.FetchVector; +import com.yahoo.vespa.flags.FlagDefinition; +import com.yahoo.vespa.flags.Flags; +import com.yahoo.vespa.flags.json.FlagData; import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry; import java.net.URI; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Set; +import static com.yahoo.vespa.flags.FetchVector.Dimension.CLOUD; +import static com.yahoo.vespa.flags.FetchVector.Dimension.ENVIRONMENT; +import static com.yahoo.vespa.flags.FetchVector.Dimension.SYSTEM; +import static com.yahoo.vespa.flags.FetchVector.Dimension.ZONE_ID; + /** * Represents either configservers in a zone or controllers in a system. * @@ -38,24 +50,28 @@ public interface FlagsTarget { Optional<AthenzIdentity> athenzHttpsIdentity(); String asString(); + FlagData partiallyResolveFlagData(FlagData data); + static Set<FlagsTarget> getAllTargetsInSystem(ZoneRegistry registry, boolean reachableOnly) { - SystemName system = registry.system(); Set<FlagsTarget> targets = new HashSet<>(); ZoneList filteredZones = reachableOnly ? registry.zones().reachable() : registry.zones().all(); for (ZoneApi zone : filteredZones.zones()) { - targets.add(forConfigServer(registry, zone.getId())); + targets.add(forConfigServer(registry, zone)); } - targets.add(forController(system)); + targets.add(forController(registry.systemZone())); return targets; } - static FlagsTarget forController(SystemName systemName) { - return new ControllerFlagsTarget(systemName); + static FlagsTarget forController(ZoneApi controllerZone) { + return new ControllerFlagsTarget(controllerZone.getSystemName(), controllerZone.getCloudName(), controllerZone.getVirtualId()); } - static FlagsTarget forConfigServer(ZoneRegistry registry, ZoneId zoneId) { - return new ConfigServerFlagsTarget( - registry.system(), zoneId, registry.getConfigServerVipUri(zoneId), registry.getConfigServerHttpsIdentity(zoneId)); + static FlagsTarget forConfigServer(ZoneRegistry registry, ZoneApi zone) { + return new ConfigServerFlagsTarget(registry.system(), + zone.getCloudName(), + zone.getVirtualId(), + registry.getConfigServerVipUri(zone.getVirtualId()), + registry.getConfigServerHttpsIdentity(zone.getVirtualId())); } static String defaultFile() { return jsonFile("default"); } @@ -64,6 +80,25 @@ public interface FlagsTarget { static String zoneFile(SystemName system, ZoneId zone) { return jsonFile(system.value() + "." + zone.environment().value() + "." + zone.region().value()); } static String controllerFile(SystemName system) { return jsonFile(system.value() + ".controller"); } + /** Partially resolve inter-zone dimensions, except those dimensions defined by the flag for a controller zone. */ + static FlagData partialResolve(FlagData data, SystemName system, CloudName cloud, ZoneId virtualZoneId) { + Set<FetchVector.Dimension> flagDimensions = + virtualZoneId.equals(ZoneId.ofVirtualControllerZone()) ? + Flags.getFlag(data.id()) + .map(FlagDefinition::getDimensions) + .map(Set::copyOf) + // E.g. testing: Assume unknown flag should resolve any and all dimensions below + .orElse(EnumSet.noneOf(FetchVector.Dimension.class)) : + EnumSet.noneOf(FetchVector.Dimension.class); + + var fetchVector = new FetchVector(); + if (!flagDimensions.contains(CLOUD)) fetchVector = fetchVector.with(CLOUD, cloud.value()); + if (!flagDimensions.contains(ENVIRONMENT)) fetchVector = fetchVector.with(ENVIRONMENT, virtualZoneId.environment().value()); + if (!flagDimensions.contains(SYSTEM)) fetchVector = fetchVector.with(SYSTEM, system.value()); + if (!flagDimensions.contains(ZONE_ID)) fetchVector = fetchVector.with(ZONE_ID, virtualZoneId.value()); + return fetchVector.isEmpty() ? data : data.partialResolve(fetchVector); + } + private static String jsonFile(String nameWithoutExtension) { return nameWithoutExtension + ".json"; } } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java index 60950341a42..1c547fea8ba 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java @@ -4,9 +4,17 @@ package com.yahoo.vespa.hosted.controller.api.systemflags.v1; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.ObjectNode; +import com.yahoo.component.Version; import com.yahoo.config.provision.ApplicationId; +import com.yahoo.config.provision.CloudName; +import com.yahoo.config.provision.ClusterSpec; +import com.yahoo.config.provision.Environment; +import com.yahoo.config.provision.HostName; import com.yahoo.config.provision.NodeType; import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.TenantName; +import com.yahoo.config.provision.zone.ZoneApi; +import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.text.JSON; import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.FlagId; @@ -38,6 +46,9 @@ import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import java.util.zip.ZipOutputStream; +import static com.yahoo.config.provision.CloudName.AWS; +import static com.yahoo.config.provision.CloudName.GCP; +import static com.yahoo.config.provision.CloudName.YAHOO; import static com.yahoo.yolean.Exceptions.uncheck; /** @@ -189,7 +200,10 @@ public class SystemFlagsDataArchive { if (rawData.isBlank()) { flagData = new FlagData(directoryDeducedFlagId); } else { - String normalizedRawData = normalizeJson(rawData); + Set<ZoneId> zones = systemDefinition == null ? + Set.of() : + systemDefinition.zones().all().zones().stream().map(ZoneApi::getVirtualId).collect(Collectors.toSet()); + String normalizedRawData = normalizeJson(rawData, zones); flagData = FlagData.deserialize(normalizedRawData); if (!directoryDeducedFlagId.equals(flagData.id())) { throw new IllegalArgumentException( @@ -217,41 +231,63 @@ public class SystemFlagsDataArchive { builder.addFile(filename, flagData); } - static String normalizeJson(String json) { + static String normalizeJson(String json, Set<ZoneId> zones) { JsonNode root = uncheck(() -> mapper.readTree(json)); removeCommentsRecursively(root); - verifyValues(root); + verifyValues(root, zones); return root.toString(); } - private static void verifyValues(JsonNode root) { + private static void verifyValues(JsonNode root, Set<ZoneId> zones) { var cursor = new JsonAccessor(root); cursor.get("rules").forEachArrayElement(rule -> rule.get("conditions").forEachArrayElement(condition -> { - var dimension = condition.get("dimension"); - if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.APPLICATION_ID))) { - condition.get("values").forEachArrayElement(conditionValue -> { - String applicationIdString = conditionValue.asString() - .orElseThrow(() -> new IllegalArgumentException("Non-string application ID: " + conditionValue)); - // Throws exception if not recognized - ApplicationId.fromSerializedForm(applicationIdString); + FetchVector.Dimension dimension = DimensionHelper + .fromWire(condition.get("dimension") + .asString() + .orElseThrow(() -> new IllegalArgumentException("Invalid dimension in condition: " + condition))); + switch (dimension) { + case APPLICATION_ID -> validateStringValues(condition, ApplicationId::fromSerializedForm); + case CONSOLE_USER_EMAIL -> validateStringValues(condition, email -> {}); + case CLOUD -> validateStringValues(condition, cloud -> { + if (!Set.of(YAHOO, AWS, GCP).contains(CloudName.from(cloud))) + throw new IllegalArgumentException("Unknown cloud: " + cloud); }); - } else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.NODE_TYPE))) { - condition.get("values").forEachArrayElement(conditionValue -> { - String nodeTypeString = conditionValue.asString() - .orElseThrow(() -> new IllegalArgumentException("Non-string node type: " + conditionValue)); - // Throws exception if not recognized - NodeType.valueOf(nodeTypeString); + case CLUSTER_ID -> validateStringValues(condition, ClusterSpec.Id::from); + case CLUSTER_TYPE -> validateStringValues(condition, ClusterSpec.Type::from); + case ENVIRONMENT -> validateStringValues(condition, Environment::from); + case HOSTNAME -> validateStringValues(condition, HostName::of); + case NODE_TYPE -> validateStringValues(condition, NodeType::valueOf); + case SYSTEM -> validateStringValues(condition, system -> { + if (!Set.of(SystemName.cd, SystemName.main, SystemName.PublicCd, SystemName.Public).contains(SystemName.from(system))) + throw new IllegalArgumentException("Unknown system: " + system); + }); + case TENANT_ID -> validateStringValues(condition, TenantName::from); + case VESPA_VERSION -> validateStringValues(condition, versionString -> { + Version vespaVersion = Version.fromString(versionString); + if (vespaVersion.getMajor() < 8) + throw new IllegalArgumentException("Major Vespa version must be at least 8: " + versionString); + }); + case ZONE_ID -> validateStringValues(condition, zoneId -> { + if (!zones.contains(ZoneId.from(zoneId))) + throw new IllegalArgumentException("Unknown zone: " + zoneId); }); - } else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.CONSOLE_USER_EMAIL))) { - condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString() - .orElseThrow(() -> new IllegalArgumentException("Non-string email address: " + conditionValue))); - } else if (dimension.isEqualTo(DimensionHelper.toWire(FetchVector.Dimension.TENANT_ID))) { - condition.get("values").forEachArrayElement(conditionValue -> conditionValue.asString() - .orElseThrow(() -> new IllegalArgumentException("Non-string tenant ID: " + conditionValue))); } })); } + private static void validateStringValues(JsonAccessor condition, Consumer<String> valueValidator) { + condition.get("values").forEachArrayElement(conditionValue -> { + String value = conditionValue.asString() + .orElseThrow(() -> { + String dimension = condition.get("dimension").asString().orElseThrow(); + String type = condition.get("type").asString().orElseThrow(); + return new IllegalArgumentException("Non-string value in %s %s condition: %s".formatted( + dimension, type, conditionValue)); + }); + valueValidator.accept(value); + }); + } + private static void removeCommentsRecursively(JsonNode node) { if (node instanceof ObjectNode) { ObjectNode objectNode = (ObjectNode) node; diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java index d010893f1d4..a24bed54a8a 100644 --- a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java +++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java @@ -54,15 +54,20 @@ public class SystemFlagsDataArchiveTest { @TempDir public File temporaryFolder; - private static final FlagsTarget mainControllerTarget = FlagsTarget.forController(SYSTEM); - private static final FlagsTarget cdControllerTarget = FlagsTarget.forController(SystemName.cd); + private static final FlagsTarget mainControllerTarget = createControllerTarget(SYSTEM); + private static final FlagsTarget cdControllerTarget = createControllerTarget(SystemName.cd); private static final FlagsTarget prodUsWestCfgTarget = createConfigserverTarget(Environment.prod, "us-west-1"); private static final FlagsTarget prodUsEast3CfgTarget = createConfigserverTarget(Environment.prod, "us-east-3"); private static final FlagsTarget devUsEast1CfgTarget = createConfigserverTarget(Environment.dev, "us-east-1"); + private static FlagsTarget createControllerTarget(SystemName system) { + return new ControllerFlagsTarget(system, CloudName.YAHOO, ZoneId.from(Environment.prod, RegionName.from("us-east-1"))); + } + private static FlagsTarget createConfigserverTarget(Environment environment, String region) { return new ConfigServerFlagsTarget( SYSTEM, + CloudName.YAHOO, ZoneId.from(environment, RegionName.from(region)), URI.create("https://cfg-" + region), new AthenzService("vespa.cfg-" + region)); @@ -177,102 +182,85 @@ public class SystemFlagsDataArchiveTest { " \"comment\": \"comment d\"\n" + " }\n" + " ]\n" + - "}"))); + "}", Set.of()))); } @Test - void normalize_json_fail_on_invalid_application() { - try { - SystemFlagsDataArchive.normalizeJson("{\n" + - " \"id\": \"foo\",\n" + - " \"rules\": [\n" + - " {\n" + - " \"conditions\": [\n" + - " {\n" + - " \"type\": \"whitelist\",\n" + - " \"dimension\": \"application\",\n" + - " \"values\": [ \"a.b.c\" ]\n" + - " }\n" + - " ],\n" + - " \"value\": true\n" + - " }\n" + - " ]\n" + - "}\n"); - fail(); - } catch (IllegalArgumentException e) { - assertEquals("Application ids must be on the form tenant:application:instance, but was a.b.c", e.getMessage()); - } + void normalize_json_succeed_on_valid_values() { + normalizeJson("application", "\"a:b:c\""); + normalizeJson("cloud", "\"yahoo\""); + normalizeJson("cloud", "\"aws\""); + normalizeJson("cloud", "\"gcp\""); + normalizeJson("cluster-id", "\"some-id\""); + normalizeJson("cluster-type", "\"admin\""); + normalizeJson("cluster-type", "\"container\""); + normalizeJson("cluster-type", "\"content\""); + normalizeJson("console-user-email", "\"name@domain.com\""); + normalizeJson("environment", "\"prod\""); + normalizeJson("environment", "\"staging\""); + normalizeJson("environment", "\"test\""); + normalizeJson("hostname", "\"2080046-v6-11.ostk.bm2.prod.gq1.yahoo.com\""); + normalizeJson("node-type", "\"tenant\""); + normalizeJson("node-type", "\"host\""); + normalizeJson("node-type", "\"config\""); + normalizeJson("node-type", "\"host\""); + normalizeJson("system", "\"main\""); + normalizeJson("system", "\"public\""); + normalizeJson("tenant", "\"vespa\""); + normalizeJson("vespa-version", "\"8.201.13\""); + normalizeJson("zone", "\"prod.us-west-1\"", Set.of(ZoneId.from("prod.us-west-1"))); } - @Test - void normalize_json_fail_on_invalid_node_type() { - try { - SystemFlagsDataArchive.normalizeJson("{\n" + - " \"id\": \"foo\",\n" + - " \"rules\": [\n" + - " {\n" + - " \"conditions\": [\n" + - " {\n" + - " \"type\": \"whitelist\",\n" + - " \"dimension\": \"node-type\",\n" + - " \"values\": [ \"footype\" ]\n" + - " }\n" + - " ],\n" + - " \"value\": true\n" + - " }\n" + - " ]\n" + - "}\n"); - fail(); - } catch (IllegalArgumentException e) { - assertEquals("No enum constant com.yahoo.config.provision.NodeType.footype", e.getMessage()); - } + private void normalizeJson(String dimension, String jsonValue) { + normalizeJson(dimension, jsonValue, Set.of()); } - @Test - void normalize_json_fail_on_invalid_email() { - try { - SystemFlagsDataArchive.normalizeJson("{\n" + - " \"id\": \"foo\",\n" + - " \"rules\": [\n" + - " {\n" + - " \"conditions\": [\n" + - " {\n" + - " \"type\": \"whitelist\",\n" + - " \"dimension\": \"console-user-email\",\n" + - " \"values\": [ 123 ]\n" + - " }\n" + - " ],\n" + - " \"value\": true\n" + - " }\n" + - " ]\n" + - "}\n"); - fail(); - } catch (IllegalArgumentException e) { - assertEquals("Non-string email address: 123", e.getMessage()); - } + private void normalizeJson(String dimension, String jsonValue, Set<ZoneId> zones) { + SystemFlagsDataArchive.normalizeJson(""" + { + "id": "foo", + "rules": [ + { + "conditions": [ + { + "type": "whitelist", + "dimension": "%s", + "values": [ %s ] + } + ], + "value": true + } + ] + } + """.formatted(dimension, jsonValue), zones); } @Test - void normalize_json_fail_on_invalid_tenant_id() { + void normalize_json_fail_on_invalid_values() { + failNormalizeJson("application", "\"a.b.c\"", "Application ids must be on the form tenant:application:instance, but was a.b.c"); + failNormalizeJson("cloud", "\"foo\"", "Unknown cloud: foo"); + // failNormalizeJson("cluster-id", ... any String is valid + failNormalizeJson("cluster-type", "\"foo\"", "Illegal cluster type 'foo'"); + failNormalizeJson("console-user-email", "123", "Non-string value in console-user-email whitelist condition: 123"); + failNormalizeJson("environment", "\"foo\"", "'foo' is not a valid environment identifier"); + failNormalizeJson("hostname", "\"not:a:hostname\"", "hostname must match '(([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.?', but got: 'not:a:hostname'"); + failNormalizeJson("node-type", "\"footype\"", "No enum constant com.yahoo.config.provision.NodeType.footype"); + failNormalizeJson("system", "\"bar\"", "'bar' is not a valid system"); + failNormalizeJson("tenant", "123", "Non-string value in tenant whitelist condition: 123"); + failNormalizeJson("vespa-version", "\"not-a-version\"", "Invalid version component in 'not-a-version'"); + failNormalizeJson("zone", "\"dev.non-existing-zone\"", Set.of(ZoneId.from("prod.example-region")), "Unknown zone: dev.non-existing-zone"); + } + + private void failNormalizeJson(String dimension, String jsonValue, String expectedExceptionMessage) { + failNormalizeJson(dimension, jsonValue, Set.of(), expectedExceptionMessage); + } + + private void failNormalizeJson(String dimension, String jsonValue, Set<ZoneId> zones, String expectedExceptionMessage) { try { - SystemFlagsDataArchive.normalizeJson("{\n" + - " \"id\": \"foo\",\n" + - " \"rules\": [\n" + - " {\n" + - " \"conditions\": [\n" + - " {\n" + - " \"type\": \"whitelist\",\n" + - " \"dimension\": \"tenant\",\n" + - " \"values\": [ 123 ]\n" + - " }\n" + - " ],\n" + - " \"value\": true\n" + - " }\n" + - " ]\n" + - "}\n"); + normalizeJson(dimension, jsonValue, zones); fail(); - } catch (IllegalArgumentException e) { - assertEquals("Non-string tenant ID: 123", e.getMessage()); + } catch (RuntimeException e) { + assertEquals(expectedExceptionMessage, e.getMessage()); } } @@ -291,6 +279,11 @@ public class SystemFlagsDataArchiveTest { // Cannot use the standard registry mock as it's located in controller-server module ZoneRegistry registryMock = mock(ZoneRegistry.class); when(registryMock.system()).thenReturn(SystemName.main); + ZoneApi zoneApi = mock(ZoneApi.class); + when(zoneApi.getSystemName()).thenReturn(SystemName.main); + when(zoneApi.getCloudName()).thenReturn(CloudName.YAHOO); + when(zoneApi.getVirtualId()).thenReturn(ZoneId.ofVirtualControllerZone()); + when(registryMock.systemZone()).thenReturn(zoneApi); when(registryMock.getConfigServerVipUri(any())).thenReturn(URI.create("http://localhost:8080/")); when(registryMock.getConfigServerHttpsIdentity(any())).thenReturn(new AthenzService("domain", "servicename")); ZoneList zoneListMock = mock(ZoneList.class); @@ -333,7 +326,7 @@ public class SystemFlagsDataArchiveTest { @Override public SystemName getSystemName() { return SystemName.main; } @Override public ZoneId getId() { return zoneId; } - @Override public CloudName getCloudName() { throw new UnsupportedOperationException(); } + @Override public CloudName getCloudName() { return CloudName.YAHOO; } @Override public String getCloudNativeRegionName() { throw new UnsupportedOperationException(); } } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzClientFactoryImpl.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzClientFactoryImpl.java index c88eb2f1b86..aa50f9d3a87 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzClientFactoryImpl.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzClientFactoryImpl.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.athenz.impl; +import ai.vespa.metrics.ControllerMetrics; import com.yahoo.component.annotation.Inject; import com.yahoo.jdisc.Metric; import com.yahoo.vespa.athenz.api.AthenzIdentity; @@ -22,7 +23,7 @@ import java.util.Map; */ public class AthenzClientFactoryImpl implements AthenzClientFactory { - private static final String METRIC_NAME = "athenz.request.error"; + private static final String METRIC_NAME = ControllerMetrics.ATHENZ_REQUEST_ERROR.baseName(); private static final String ATHENZ_SERVICE_DIMENSION = "athenz-service"; private static final String EXCEPTION_DIMENSION = "exception"; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobMetrics.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobMetrics.java index b9bff5f777e..2924bb83104 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobMetrics.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobMetrics.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.deployment; +import ai.vespa.metrics.ControllerMetrics; import com.yahoo.jdisc.Metric; import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId; @@ -13,19 +14,19 @@ import java.util.Map; */ public class JobMetrics { - public static final String start = "deployment.start"; - public static final String nodeAllocationFailure = "deployment.nodeAllocationFailure"; - public static final String endpointCertificateTimeout = "deployment.endpointCertificateTimeout"; - public static final String deploymentFailure = "deployment.deploymentFailure"; - public static final String invalidApplication = "deployment.invalidApplication"; - public static final String convergenceFailure = "deployment.convergenceFailure"; - public static final String testFailure = "deployment.testFailure"; - public static final String noTests = "deployment.noTests"; - public static final String error = "deployment.error"; - public static final String abort = "deployment.abort"; - public static final String cancel = "deployment.cancel"; - public static final String success = "deployment.success"; - public static final String quotaExceeded = "deployment.quotaExceeded"; + public static final String start = ControllerMetrics.DEPLOYMENT_START.baseName(); + public static final String nodeAllocationFailure = ControllerMetrics.DEPLOYMENT_NODE_ALLOCATION_FAILURE.baseName(); + public static final String endpointCertificateTimeout = ControllerMetrics.DEPLOYMENT_ENDPOINT_CERTIFICATE_TIMEOUT.baseName(); + public static final String deploymentFailure = ControllerMetrics.DEPLOYMENT_DEPLOYMENT_FAILURE.baseName(); + public static final String invalidApplication = ControllerMetrics.DEPLOYMENT_INVALID_APPLICATION.baseName(); + public static final String convergenceFailure = ControllerMetrics.DEPLOYMENT_CONVERGENCE_FAILURE.baseName(); + public static final String testFailure = ControllerMetrics.DEPLOYMENT_TEST_FAILURE.baseName(); + public static final String noTests = ControllerMetrics.DEPLOYMENT_NO_TESTS.baseName(); + public static final String error = ControllerMetrics.DEPLOYMENT_ERROR.baseName(); + public static final String abort = ControllerMetrics.DEPLOYMENT_ABORT.baseName(); + public static final String cancel = ControllerMetrics.DEPLOYMENT_CANCEL.baseName(); + public static final String success = ControllerMetrics.DEPLOYMENT_SUCCESS.baseName(); + public static final String quotaExceeded = ControllerMetrics.DEPLOYMENT_QUOTA_EXCEEDED.baseName(); private final Metric metric; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java index b2ed0941c8e..33a4802360e 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.maintenance; +import ai.vespa.metrics.ControllerMetrics; import com.yahoo.config.provision.TenantName; import com.yahoo.config.provision.zone.ZoneId; import com.yahoo.jdisc.Metric; @@ -24,7 +25,7 @@ import java.util.stream.Collectors; */ public class ArchiveAccessMaintainer extends ControllerMaintainer { - private static final String bucketCountMetricName = "archive.bucketCount"; + private static final String bucketCountMetricName = ControllerMetrics.ARCHIVE_BUCKET_COUNT.baseName(); private final CuratorArchiveBucketDb archiveBucketDb; private final ArchiveService archiveService; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java index 71f9c37577a..6a280e71e98 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java @@ -1,6 +1,8 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.maintenance; +import ai.vespa.metrics.ConfigServerMetrics; +import ai.vespa.metrics.ControllerMetrics; import com.yahoo.component.Version; import com.yahoo.config.application.api.DeploymentInstanceSpec; import com.yahoo.config.provision.ApplicationId; @@ -48,22 +50,22 @@ import java.util.stream.Collectors; */ public class MetricsReporter extends ControllerMaintainer { - public static final String TENANT_METRIC = "billing.tenants"; - public static final String DEPLOYMENT_FAIL_METRIC = "deployment.failurePercentage"; - public static final String DEPLOYMENT_AVERAGE_DURATION = "deployment.averageDuration"; - public static final String DEPLOYMENT_FAILING_UPGRADES = "deployment.failingUpgrades"; - public static final String DEPLOYMENT_BUILD_AGE_SECONDS = "deployment.buildAgeSeconds"; - public static final String DEPLOYMENT_WARNINGS = "deployment.warnings"; - public static final String DEPLOYMENT_OVERDUE_UPGRADE = "deployment.overdueUpgradeSeconds"; - public static final String OS_CHANGE_DURATION = "deployment.osChangeDuration"; - public static final String PLATFORM_CHANGE_DURATION = "deployment.platformChangeDuration"; - public static final String OS_NODE_COUNT = "deployment.nodeCountByOsVersion"; - public static final String PLATFORM_NODE_COUNT = "deployment.nodeCountByPlatformVersion"; - public static final String BROKEN_SYSTEM_VERSION = "deployment.brokenSystemVersion"; - public static final String REMAINING_ROTATIONS = "remaining_rotations"; - public static final String NAME_SERVICE_REQUESTS_QUEUED = "dns.queuedRequests"; + public static final String TENANT_METRIC = ControllerMetrics.BILLING_TENANTS.baseName(); + public static final String DEPLOYMENT_FAIL_METRIC = ControllerMetrics.DEPLOYMENT_FAILURE_PERCENTAGE.baseName(); + public static final String DEPLOYMENT_AVERAGE_DURATION = ControllerMetrics.DEPLOYMENT_AVERAGE_DURATION.baseName(); + public static final String DEPLOYMENT_FAILING_UPGRADES = ControllerMetrics.DEPLOYMENT_FAILING_UPGRADES.baseName(); + public static final String DEPLOYMENT_BUILD_AGE_SECONDS = ControllerMetrics.DEPLOYMENT_BUILD_AGE_SECONDS.baseName(); + public static final String DEPLOYMENT_WARNINGS = ControllerMetrics.DEPLOYMENT_WARNINGS.baseName(); + public static final String DEPLOYMENT_OVERDUE_UPGRADE = ControllerMetrics.DEPLOYMENT_OVERDUE_UPGRADE_SECONDS.baseName(); + public static final String OS_CHANGE_DURATION = ControllerMetrics.DEPLOYMENT_OS_CHANGE_DURATION.baseName(); + public static final String PLATFORM_CHANGE_DURATION = ControllerMetrics.DEPLOYMENT_PLATFORM_CHANGE_DURATION.baseName(); + public static final String OS_NODE_COUNT = ControllerMetrics.DEPLOYMENT_NODE_COUNT_BY_OS_VERSION.baseName(); + public static final String PLATFORM_NODE_COUNT = ControllerMetrics.DEPLOYMENT_NODE_COUNT_BY_PLATFORM_VERSION.baseName(); + public static final String BROKEN_SYSTEM_VERSION = ControllerMetrics.DEPLOYMENT_BROKEN_SYSTEM_VERSION.baseName(); + public static final String REMAINING_ROTATIONS = ControllerMetrics.REMAINING_ROTATIONS.baseName(); + public static final String NAME_SERVICE_REQUESTS_QUEUED = ControllerMetrics.DNS_QUEUED_REQUESTS.baseName(); public static final String OPERATION_PREFIX = "operation."; - public static final String ZMS_QUOTA_USAGE = "zms.quota.usage"; + public static final String ZMS_QUOTA_USAGE = ControllerMetrics.ZMS_QUOTA_USAGE.baseName(); private final Metric metric; private final Clock clock; diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java index 4824ccc576a..6294fc59b5e 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java @@ -987,9 +987,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { private HttpResponse generateToken(String tenant, String tokenid, HttpRequest request) { - // 'expiration=PT0S' for no expiration, no 'expiration' for default TTL. - Duration expiration = Optional.ofNullable(request.getProperty("expiration")) - .map(Duration::parse).orElse(DataplaneTokenService.DEFAULT_TTL); + var expiration = resolveExpiration(request).orElse(null); DataplaneToken token = controller.dataplaneTokenService().generateToken( TenantName.from(tenant), TokenId.of(tokenid), expiration, request.getJDiscRequest().getUserPrincipal()); Slime slime = new Slime(); @@ -1001,6 +999,22 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { return new SlimeJsonResponse(slime); } + /** + * Specify 'expiration=none' for no expiration, no parameter or 'expiration=default' for default TTL. + * Use ISO-8601 format for timestamp or period, + * e.g 'expiration=PT1H' for 1 hour, 'expiration=2021-01-01T12:00:00Z' for a specific time. + */ + private Optional<Instant> resolveExpiration(HttpRequest r) { + var expirationParam = r.getProperty("expiration"); + var now = controller.clock().instant(); + if (expirationParam == null || expirationParam.equals("default")) + return Optional.of(now.plus(DataplaneTokenService.DEFAULT_TTL)); + if (expirationParam.equals("none")) return Optional.empty(); + return expirationParam.startsWith("P") + ? Optional.of(now.plus(Duration.parse(expirationParam))) + : Optional.of(Instant.parse(expirationParam)); + } + private HttpResponse deleteToken(String tenant, String tokenid, HttpRequest request) { String fingerprint = Optional.ofNullable(request.getProperty("fingerprint")).orElseThrow(() -> new IllegalArgumentException("Cannot delete token without fingerprint")); controller.dataplaneTokenService().deleteToken(TenantName.from(tenant), TokenId.of(tokenid), FingerPrint.of(fingerprint)); diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenService.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenService.java index 32872a01bce..385200a1624 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenService.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenService.java @@ -54,12 +54,11 @@ public class DataplaneTokenService { * * @param tenantName name of the tenant to connect the token to * @param tokenId The user generated name/id of the token - * @param ttl The time to live of the token. Use {@link Duration#ZERO} for no TTL. + * @param expiration Token expiration * @param principal The principal making the request * @return a DataplaneToken containing the secret generated token */ - public DataplaneToken generateToken(TenantName tenantName, TokenId tokenId, Duration ttl, Principal principal) { - Optional<Instant> expiration = ttl.isZero() ? Optional.empty() : Optional.ofNullable(controller.clock().instant().plus(ttl)); + public DataplaneToken generateToken(TenantName tenantName, TokenId tokenId, Instant expiration, Principal principal) { TokenDomain tokenDomain = TokenDomain.of("Vespa Cloud tenant data plane:%s".formatted(tenantName.value())); Token token = TokenGenerator.generateToken(tokenDomain, TOKEN_PREFIX, TOKEN_BYTES); TokenCheckHash checkHash = TokenCheckHash.of(token, CHECK_HASH_BYTES); @@ -67,7 +66,7 @@ public class DataplaneTokenService { FingerPrint.of(token.fingerprint().toDelimitedHexString()), checkHash.toHexString(), controller.clock().instant(), - expiration, + Optional.ofNullable(expiration), principal.getName()); CuratorDb curator = controller.curator(); @@ -92,7 +91,7 @@ public class DataplaneTokenService { // Return the data plane token including the secret token. return new DataplaneToken(tokenId, FingerPrint.of(token.fingerprint().toDelimitedHexString()), - token.secretTokenString(), expiration); + token.secretTokenString(), Optional.ofNullable(expiration)); } } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployer.java index abc888abccb..355f06fc753 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployer.java @@ -78,8 +78,12 @@ class SystemFlagsDeployer { return SystemFlagsDeployResult.merge(results); } - private SystemFlagsDeployResult deployFlags(FlagsTarget target, List<FlagData> flagData, boolean dryRun) { - Map<FlagId, FlagData> wantedFlagData = lookupTable(flagData); + private SystemFlagsDeployResult deployFlags(FlagsTarget target, List<FlagData> flagDataList, boolean dryRun) { + flagDataList = flagDataList.stream() + .map(target::partiallyResolveFlagData) + .filter(flagData -> !flagData.isEmpty()) + .toList(); + Map<FlagId, FlagData> wantedFlagData = lookupTable(flagDataList); Map<FlagId, FlagData> currentFlagData; List<FlagId> definedFlags; try { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java index 6fd44e09d8d..21fe1f66bc5 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java @@ -36,8 +36,8 @@ public class ZoneApiMock implements ZoneApi { } } - public static ZoneApiMock fromId(String id) { - return from(ZoneId.from(id)); + public static ZoneApiMock fromId(String zoneId) { + return from(ZoneId.from(zoneId)); } public static ZoneApiMock from(Environment environment, RegionName region) { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java index e6a9014df94..63d479d4c6c 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java @@ -161,7 +161,7 @@ public class ZoneRegistryMock extends AbstractComponent implements ZoneRegistry @Override public ZoneApi systemZone() { - return ZoneApiMock.fromId("prod.controller"); + return ZoneApiMock.newBuilder().withSystem(system).withVirtualId(ZoneId.ofVirtualControllerZone()).build(); } @Override @@ -180,7 +180,7 @@ public class ZoneRegistryMock extends AbstractComponent implements ZoneRegistry private ZoneApiMock systemAsZone() { return ZoneApiMock.newBuilder() .with(ZoneId.from("prod.us-east-1")) - .withVirtualId(ZoneId.from("prod.controller")) + .withVirtualId(ZoneId.ofVirtualControllerZone()) .build(); } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenServiceTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenServiceTest.java index 9a8e43d1597..e148eac7365 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenServiceTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenServiceTest.java @@ -30,7 +30,7 @@ public class DataplaneTokenServiceTest { @Test void generates_and_persists_token() { - DataplaneToken dataplaneToken = dataplaneTokenService.generateToken(tenantName, tokenId, Duration.ofDays(100), principal); + DataplaneToken dataplaneToken = dataplaneTokenService.generateToken(tenantName, tokenId, tester.clock().instant().plus(Duration.ofDays(100)), principal); List<DataplaneTokenVersions> dataplaneTokenVersions = dataplaneTokenService.listTokens(tenantName); assertEquals(dataplaneToken.fingerPrint(), dataplaneTokenVersions.get(0).tokenVersions().get(0).fingerPrint()); assertEquals(dataplaneToken.expiration(), dataplaneTokenVersions.get(0).tokenVersions().get(0).expiration()); @@ -38,8 +38,8 @@ public class DataplaneTokenServiceTest { @Test void generating_new_token_appends() { - DataplaneToken dataplaneToken1 = dataplaneTokenService.generateToken(tenantName, tokenId, Duration.ofDays(1), principal); - DataplaneToken dataplaneToken2 = dataplaneTokenService.generateToken(tenantName, tokenId, Duration.ZERO, principal); + DataplaneToken dataplaneToken1 = dataplaneTokenService.generateToken(tenantName, tokenId, tester.clock().instant().plus(Duration.ofDays(1)), principal); + DataplaneToken dataplaneToken2 = dataplaneTokenService.generateToken(tenantName, tokenId, null, principal); assertNotEquals(dataplaneToken1.fingerPrint(), dataplaneToken2.fingerPrint()); List<DataplaneTokenVersions> dataplaneTokenVersions = dataplaneTokenService.listTokens(tenantName); @@ -54,8 +54,8 @@ public class DataplaneTokenServiceTest { @Test void delete_last_fingerprint_deletes_token() { - DataplaneToken dataplaneToken1 = dataplaneTokenService.generateToken(tenantName, tokenId, Duration.ZERO, principal); - DataplaneToken dataplaneToken2 = dataplaneTokenService.generateToken(tenantName, tokenId, Duration.ZERO, principal); + DataplaneToken dataplaneToken1 = dataplaneTokenService.generateToken(tenantName, tokenId, null, principal); + DataplaneToken dataplaneToken2 = dataplaneTokenService.generateToken(tenantName, tokenId, null, principal); dataplaneTokenService.deleteToken(tenantName, tokenId, dataplaneToken1.fingerPrint()); dataplaneTokenService.deleteToken(tenantName, tokenId, dataplaneToken2.fingerPrint()); assertEquals(List.of(), dataplaneTokenService.listTokens(tenantName)); @@ -63,8 +63,8 @@ public class DataplaneTokenServiceTest { @Test void deleting_nonexistent_fingerprint_throws() { - DataplaneToken dataplaneToken = dataplaneTokenService.generateToken(tenantName, tokenId, Duration.ZERO, principal); - DataplaneToken dataplaneToken2 = dataplaneTokenService.generateToken(tenantName, tokenId, Duration.ZERO, principal); + DataplaneToken dataplaneToken = dataplaneTokenService.generateToken(tenantName, tokenId, null, principal); + DataplaneToken dataplaneToken2 = dataplaneTokenService.generateToken(tenantName, tokenId, null, principal); dataplaneTokenService.deleteToken(tenantName, tokenId, dataplaneToken.fingerPrint()); // Token currently contains value of "dataplaneToken2" @@ -74,7 +74,7 @@ public class DataplaneTokenServiceTest { @Test void deleting_nonexistent_token_throws() { - DataplaneToken dataplaneToken = dataplaneTokenService.generateToken(tenantName, tokenId, Duration.ZERO, principal); + DataplaneToken dataplaneToken = dataplaneTokenService.generateToken(tenantName, tokenId, null, principal); dataplaneTokenService.deleteToken(tenantName, tokenId, dataplaneToken.fingerPrint()); // Token is created and deleted above, no longer exists diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployResultTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployResultTest.java index 36679e0dd91..d0d362abcfc 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployResultTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployResultTest.java @@ -20,9 +20,12 @@ import static org.assertj.core.api.Assertions.assertThat; * @author bjorncs */ public class SystemFlagsDeployResultTest { + private final ZoneApiMock prodUsWest1Zone = ZoneApiMock.fromId("prod.us-west-1"); + private final ZoneRegistryMock registry = new ZoneRegistryMock(SystemName.cd).setZones(prodUsWest1Zone); + @Test void changes_and_errors_are_present_in_wire_format() { - FlagsTarget controllerTarget = FlagsTarget.forController(SystemName.cd); + FlagsTarget controllerTarget = FlagsTarget.forController(registry.systemZone()); FlagId flagOne = new FlagId("flagone"); FlagId flagTwo = new FlagId("flagtwo"); SystemFlagsDeployResult result = new SystemFlagsDeployResult( @@ -41,10 +44,8 @@ public class SystemFlagsDeployResultTest { @Test void identical_errors_and_changes_from_multiple_targets_are_merged() { - ZoneApiMock prodUsWest1Zone = ZoneApiMock.fromId("prod.us-west-1"); - ZoneRegistryMock registry = new ZoneRegistryMock(SystemName.cd).setZones(prodUsWest1Zone); - FlagsTarget prodUsWest1Target = FlagsTarget.forConfigServer(registry, prodUsWest1Zone.getId()); - FlagsTarget controllerTarget = FlagsTarget.forController(SystemName.cd); + FlagsTarget prodUsWest1Target = FlagsTarget.forConfigServer(registry, prodUsWest1Zone); + FlagsTarget controllerTarget = FlagsTarget.forController(registry.systemZone()); FlagId flagOne = new FlagId("flagone"); FlagId flagTwo = new FlagId("flagtwo"); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployerTest.java index 50354639f6f..8ad64a08244 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployerTest.java @@ -2,7 +2,9 @@ package com.yahoo.vespa.hosted.controller.restapi.systemflags; import com.yahoo.config.provision.SystemName; +import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.FlagId; +import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.flags.json.FlagData; import com.yahoo.vespa.hosted.controller.api.systemflags.v1.FlagsTarget; import com.yahoo.vespa.hosted.controller.api.systemflags.v1.SystemFlagsDataArchive; @@ -15,11 +17,16 @@ import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Paths; import java.util.List; +import java.util.Optional; import java.util.Set; import static com.yahoo.vespa.hosted.controller.restapi.systemflags.SystemFlagsDeployResult.FlagDataChange; import static com.yahoo.vespa.hosted.controller.restapi.systemflags.SystemFlagsDeployResult.OperationError; +import static com.yahoo.yolean.Exceptions.uncheck; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -38,12 +45,12 @@ public class SystemFlagsDeployerTest { private final ZoneApiMock prodUsEast3Zone = ZoneApiMock.fromId("prod.us-east-3"); private final ZoneRegistryMock registry = new ZoneRegistryMock(SYSTEM).setZones(prodUsWest1Zone, prodUsEast3Zone); - private final FlagsTarget controllerTarget = FlagsTarget.forController(SYSTEM); - private final FlagsTarget prodUsWest1Target = FlagsTarget.forConfigServer(registry, prodUsWest1Zone.getId()); - private final FlagsTarget prodUsEast3Target = FlagsTarget.forConfigServer(registry, prodUsEast3Zone.getId()); + private final FlagsTarget controllerTarget = FlagsTarget.forController(registry.systemZone()); + private final FlagsTarget prodUsWest1Target = FlagsTarget.forConfigServer(registry, prodUsWest1Zone); + private final FlagsTarget prodUsEast3Target = FlagsTarget.forConfigServer(registry, prodUsEast3Zone); @Test - void deploys_flag_data_to_targets() throws IOException { + void deploys_flag_data_to_targets() { FlagsClient flagsClient = mock(FlagsClient.class); when(flagsClient.listFlagData(controllerTarget)).thenReturn(List.of()); when(flagsClient.listFlagData(prodUsWest1Target)).thenReturn(List.of(flagData("existing-prod.us-west-1.json"))); @@ -74,7 +81,81 @@ public class SystemFlagsDeployerTest { } @Test - void dryrun_should_not_change_flags() throws IOException { + void deploys_partial_flag_data_to_targets() { + // default.json contains one rule with 2 conditions, one of which has a condition on the aws cloud. + // This condition IS resolved for a config server target, but NOT for a controller target, because FLAG_ID + // has the CLOUD dimension set. + deployFlags(Optional.empty(), "partial/default.json", Optional.of("partial/put-controller.json"), true, PutType.CREATE, FetchVector.Dimension.CLOUD); + deployFlags(Optional.empty(), "partial/default.json", Optional.empty(), false, PutType.NONE, FetchVector.Dimension.CLOUD); + deployFlags(Optional.of("partial/initial.json"), "partial/default.json", Optional.of("partial/put-controller.json"), true, PutType.UPDATE, FetchVector.Dimension.CLOUD); + deployFlags(Optional.of("partial/initial.json"), "partial/default.json", Optional.empty(), false, PutType.DELETE, FetchVector.Dimension.CLOUD); + + // When the CLOUD dimension is NOT set on the dimension, the controller target will also resolve that dimension, and + // the result should be identical to the config server target. Let's also verify the config server target is unchanged. + deployFlags(Optional.empty(), "partial/default.json", Optional.empty(), true, PutType.NONE); + deployFlags(Optional.empty(), "partial/default.json", Optional.empty(), false, PutType.NONE); + deployFlags(Optional.of("partial/initial.json"), "partial/default.json", Optional.empty(), true, PutType.DELETE); + deployFlags(Optional.of("partial/initial.json"), "partial/default.json", Optional.empty(), false, PutType.DELETE); + } + + private enum PutType { + CREATE, + UPDATE, + DELETE, + NONE + } + + /** + * @param existingFlagDataPath path to flag data the target already has + * @param defaultFlagDataPath path to default json file + * @param putFlagDataPath path to flag data pushed to target, or empty if nothing should be pushed + * @param controller whether to target the controller, or config server + */ + private void deployFlags(Optional<String> existingFlagDataPath, + String defaultFlagDataPath, + Optional<String> putFlagDataPath, + boolean controller, + PutType putType, + FetchVector.Dimension... flagDimensions) { + List<FlagData> existingFlagData = existingFlagDataPath.map(SystemFlagsDeployerTest::flagData).map(List::of).orElse(List.of()); + FlagData defaultFlagData = flagData(defaultFlagDataPath); + FlagsTarget target = controller ? controllerTarget : prodUsWest1Target; + Optional<FlagData> putFlagData = putFlagDataPath.map(SystemFlagsDeployerTest::flagData); + + try (var replacer = Flags.clearFlagsForTesting()) { + Flags.defineStringFlag(FLAG_ID.toString(), "default", List.of("hakonhall"), "2023-07-27", "2123-07-27", "", "", flagDimensions); + + FlagsClient flagsClient = mock(FlagsClient.class); + when(flagsClient.listFlagData(target)).thenReturn(existingFlagData); + + SystemFlagsDataArchive archive = new SystemFlagsDataArchive.Builder() + .addFile("default.json", defaultFlagData) + .build(); + + SystemFlagsDeployer deployer = new SystemFlagsDeployer(flagsClient, SYSTEM, Set.of(target)); + + List<FlagDataChange> changes = deployer.deployFlags(archive, false).flagChanges(); + + putFlagData.ifPresentOrElse(flagData -> { + verify(flagsClient).putFlagData(target, flagData); + switch (putType) { + case CREATE -> assertThat(changes).containsOnly(FlagDataChange.created(FLAG_ID, target, flagData)); + case UPDATE -> assertThat(changes).containsOnly(FlagDataChange.updated(FLAG_ID, target, flagData, existingFlagData.get(0))); + case DELETE, NONE -> throw new IllegalStateException("Flag data put to the target, but change type is " + putType); + } + }, () -> { + verify(flagsClient, never()).putFlagData(eq(target), any()); + switch (putType) { + case DELETE -> assertThat(changes).containsOnly(FlagDataChange.deleted(FLAG_ID, target)); + case NONE -> assertEquals(changes, List.of()); + default -> throw new IllegalStateException("No flag data is expected to be put to the target but change type is " + putType); + } + }); + } + } + + @Test + void dryrun_should_not_change_flags() { FlagsClient flagsClient = mock(FlagsClient.class); when(flagsClient.listFlagData(controllerTarget)).thenReturn(List.of()); when(flagsClient.listDefinedFlags(controllerTarget)).thenReturn(List.of(new FlagId("my-flag"))); @@ -97,7 +178,7 @@ public class SystemFlagsDeployerTest { } @Test - void creates_error_entries_in_result_if_flag_data_operations_fail() throws IOException { + void creates_error_entries_in_result_if_flag_data_operations_fail() { FlagsClient flagsClient = mock(FlagsClient.class); UncheckedIOException exception = new UncheckedIOException(new IOException("I/O error message")); when(flagsClient.listFlagData(prodUsWest1Target)).thenThrow(exception); @@ -120,7 +201,7 @@ public class SystemFlagsDeployerTest { } @Test - void creates_error_entry_for_invalid_flag_archive() throws IOException { + void creates_error_entry_for_invalid_flag_archive() { FlagsClient flagsClient = mock(FlagsClient.class); FlagData defaultData = flagData("flags/my-flag/main.json"); SystemFlagsDataArchive archive = new SystemFlagsDataArchive.Builder() @@ -135,7 +216,7 @@ public class SystemFlagsDeployerTest { } @Test - void creates_error_entry_for_flag_data_of_undefined_flag() throws IOException { + void creates_error_entry_for_flag_data_of_undefined_flag() { FlagData prodUsEast3Data = flagData("flags/my-flag/main.prod.us-east-3.json"); FlagsClient flagsClient = mock(FlagsClient.class); when(flagsClient.listFlagData(prodUsEast3Target)) @@ -154,7 +235,7 @@ public class SystemFlagsDeployerTest { } @Test - void creates_warning_entry_for_existing_flag_data_for_undefined_flag() throws IOException { + void creates_warning_entry_for_existing_flag_data_for_undefined_flag() { FlagData prodUsEast3Data = flagData("flags/my-flag/main.prod.us-east-3.json"); FlagsClient flagsClient = mock(FlagsClient.class); when(flagsClient.listFlagData(prodUsEast3Target)) @@ -170,8 +251,8 @@ public class SystemFlagsDeployerTest { .containsOnly(OperationError.dataForUndefinedFlag(prodUsEast3Target, new FlagId("my-flag"))); } - private static FlagData flagData(String filename) throws IOException { - return FlagData.deserializeUtf8Json(Files.readAllBytes(Paths.get("src/test/resources/system-flags/" + filename))); + private static FlagData flagData(String filename) { + return FlagData.deserializeUtf8Json(uncheck(() -> Files.readAllBytes(Paths.get("src/test/resources/system-flags/" + filename)))); } } diff --git a/controller-server/src/test/resources/system-flags/partial/default.json b/controller-server/src/test/resources/system-flags/partial/default.json new file mode 100644 index 00000000000..881d4170c3b --- /dev/null +++ b/controller-server/src/test/resources/system-flags/partial/default.json @@ -0,0 +1,20 @@ +{ + "id" : "my-flag", + "rules" : [ + { + "conditions": [ + { + "type": "whitelist", + "dimension": "system", + "values": [ "main" ] + }, + { + "type": "whitelist", + "dimension": "cloud", + "values": [ "aws" ] + } + ], + "value" : "foo-value" + } + ] +}
\ No newline at end of file diff --git a/controller-server/src/test/resources/system-flags/partial/initial.json b/controller-server/src/test/resources/system-flags/partial/initial.json new file mode 100644 index 00000000000..a16ea583005 --- /dev/null +++ b/controller-server/src/test/resources/system-flags/partial/initial.json @@ -0,0 +1,15 @@ +{ + "id" : "my-flag", + "rules" : [ + { + "conditions": [ + { + "type": "whitelist", + "dimension": "application", + "values": [ "a:b:c" ] + } + ], + "value" : "bar-value" + } + ] +}
\ No newline at end of file diff --git a/controller-server/src/test/resources/system-flags/partial/put-controller.json b/controller-server/src/test/resources/system-flags/partial/put-controller.json new file mode 100644 index 00000000000..47aa0af47ce --- /dev/null +++ b/controller-server/src/test/resources/system-flags/partial/put-controller.json @@ -0,0 +1,15 @@ +{ + "id" : "my-flag", + "rules" : [ + { + "conditions": [ + { + "type": "whitelist", + "dimension": "cloud", + "values": [ "aws" ] + } + ], + "value" : "foo-value" + } + ] +}
\ No newline at end of file diff --git a/dependency-versions/pom.xml b/dependency-versions/pom.xml index f91ebab5c5d..5f75b042722 100644 --- a/dependency-versions/pom.xml +++ b/dependency-versions/pom.xml @@ -97,6 +97,7 @@ <junit.vespa.version>5.8.1</junit.vespa.version> <junit.platform.vespa.version>1.8.1</junit.platform.vespa.version> <junit4.vespa.version>4.13.2</junit4.vespa.version> + <lucene.vespa.version>9.7.0</lucene.vespa.version> <maven-archiver.vespa.version>3.6.0</maven-archiver.vespa.version> <maven-wagon.vespa.version>2.10</maven-wagon.vespa.version> <mimepull.vespa.version>1.9.6</mimepull.vespa.version> diff --git a/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java b/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java index c1877373ce2..5bcc1e67547 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java @@ -3,10 +3,13 @@ package com.yahoo.vespa.flags; import com.yahoo.vespa.flags.json.DimensionHelper; +import java.util.Collection; import java.util.EnumMap; import java.util.Map; import java.util.Objects; import java.util.Optional; +import java.util.Set; +import java.util.function.BiConsumer; import java.util.function.Consumer; /** @@ -20,20 +23,28 @@ public class FetchVector { * Note: If this enum is changed, you must also change {@link DimensionHelper}. */ public enum Dimension { - /** Value from TenantName::value, e.g. vespa-team */ - TENANT_ID, - /** Value from ApplicationId::serializedForm of the form tenant:applicationName:instance. */ APPLICATION_ID, - /** Node type from com.yahoo.config.provision.NodeType::name, e.g. tenant, host, confighost, controller, etc. */ - NODE_TYPE, + /** + * Cloud from com.yahoo.config.provision.CloudName::value, e.g. yahoo, aws, gcp. + * + * <p><em>Eager resolution</em>: This dimension is resolved before putting the flag data to the config server + * or controller, unless controller and the flag has declared this dimension. + */ + CLOUD, + + /** Cluster ID from com.yahoo.config.provision.ClusterSpec.Id::value, e.g. cluster-controllers, logserver. */ + CLUSTER_ID, /** Cluster type from com.yahoo.config.provision.ClusterSpec.Type::name, e.g. content, container, admin */ CLUSTER_TYPE, - /** Cluster ID from com.yahoo.config.provision.ClusterSpec.Id::value, e.g. cluster-controllers, logserver. */ - CLUSTER_ID, + /** Email address of user - provided by auth0 in console. */ + CONSOLE_USER_EMAIL, + + /** Hosted Vespa environment from com.yahoo.config.provision.Environment::value, e.g. prod, staging, test. */ + ENVIRONMENT, /** * Fully qualified hostname. @@ -44,6 +55,18 @@ public class FetchVector { */ HOSTNAME, + /** Node type from com.yahoo.config.provision.NodeType::name, e.g. tenant, host, confighost, controller, etc. */ + NODE_TYPE, + + /** + * Hosted Vespa system from com.yahoo.config.provision.SystemName::value, e.g. main, cd, public, publiccd. + * <em>Eager resolution</em>, see {@link #CLOUD}. + */ + SYSTEM, + + /** Value from TenantName::value, e.g. vespa-team */ + TENANT_ID, + /** * Vespa version from Version::toFullString of the form Major.Minor.Micro. * @@ -53,14 +76,9 @@ public class FetchVector { */ VESPA_VERSION, - /** Email address of user - provided by auth0 in console. */ - CONSOLE_USER_EMAIL, - /** - * Zone from ZoneId::value of the form environment.region. - * - * <p>NOTE: There is seldom any need to set ZONE_ID, as all flags are set per zone anyways. The controller - * could PERHAPS use this where it handles multiple zones. + * Virtual zone ID from com.yahoo.config.provision.zone.ZoneId::value of the form environment.region, + * see com.yahoo.config.provision.zone.ZoneApi::getVirtualId. <em>Eager resolution</em>, see {@link #CLOUD}. */ ZONE_ID } @@ -83,15 +101,13 @@ public class FetchVector { return Optional.ofNullable(map.get(dimension)); } - public Map<Dimension, String> toMap() { - return map; - } + public Map<Dimension, String> toMap() { return map; } public boolean isEmpty() { return map.isEmpty(); } - public boolean hasDimension(FetchVector.Dimension dimension) { - return map.containsKey(dimension); - } + public boolean hasDimension(FetchVector.Dimension dimension) { return map.containsKey(dimension);} + + public Set<Dimension> dimensions() { return map.keySet(); } /** * Returns a new FetchVector, identical to {@code this} except for its value in {@code dimension}. @@ -107,13 +123,28 @@ public class FetchVector { return makeFetchVector(vector -> vector.putAll(override.map)); } - private FetchVector makeFetchVector(Consumer<EnumMap<Dimension, String>> mapModifier) { - EnumMap<Dimension, String> mergedMap = new EnumMap<>(Dimension.class); + private FetchVector makeFetchVector(Consumer<Map<Dimension, String>> mapModifier) { + Map<Dimension, String> mergedMap = new EnumMap<>(Dimension.class); mergedMap.putAll(map); mapModifier.accept(mergedMap); return new FetchVector(mergedMap); } + public FetchVector without(Dimension dimension) { + return makeFetchVector(merged -> merged.remove(dimension)); + } + + public FetchVector without(Collection<Dimension> dimensions) { + return makeFetchVector(merged -> merged.keySet().removeAll(dimensions)); + } + + @Override + public String toString() { + return "FetchVector{" + + "map=" + map + + '}'; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java index 326e8f2dcae..73283958cc7 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java @@ -21,7 +21,6 @@ import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME; import static com.yahoo.vespa.flags.FetchVector.Dimension.NODE_TYPE; import static com.yahoo.vespa.flags.FetchVector.Dimension.TENANT_ID; import static com.yahoo.vespa.flags.FetchVector.Dimension.VESPA_VERSION; -import static com.yahoo.vespa.flags.FetchVector.Dimension.ZONE_ID; /** * Definitions of feature flags. @@ -53,23 +52,16 @@ public class Flags { List.of("hakonhall", "baldersheim"), "2023-03-06", "2023-08-05", "Drop caches on tenant hosts", "Takes effect on next tick", - ZONE_ID, // The application ID is the exclusive application ID associated with the host, // if any, or otherwise hosted-vespa:tenant-host:default. APPLICATION_ID, TENANT_ID, CLUSTER_ID, CLUSTER_TYPE); - public static final UnboundBooleanFlag SIMPLER_ACL = defineFeatureFlag( - "simpler-acl", true, - List.of("hakonhall"), "2023-07-04", "2023-08-04", - "Simplify ACL in hosted Vespa", - "Takes effect on the next fetch of ACL rules"); - public static final UnboundDoubleFlag DEFAULT_TERM_WISE_LIMIT = defineDoubleFlag( "default-term-wise-limit", 1.0, List.of("baldersheim"), "2020-12-02", "2023-12-31", "Default limit for when to apply termwise query evaluation", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundStringFlag QUERY_DISPATCH_POLICY = defineStringFlag( "query-dispatch-policy", "adaptive", @@ -77,83 +69,83 @@ public class Flags { "Select query dispatch policy, valid values are adaptive, round-robin, best-of-random-2," + " latency-amortized-over-requests, latency-amortized-over-time", "Takes effect at redeployment (requires restart)", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundStringFlag SUMMARY_DECODE_POLICY = defineStringFlag( "summary-decode-policy", "eager", List.of("baldersheim"), "2023-03-30", "2023-12-31", "Select summary decoding policy, valid values are eager and on-demand/ondemand.", "Takes effect at redeployment (requires restart)", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundStringFlag FEED_SEQUENCER_TYPE = defineStringFlag( "feed-sequencer-type", "THROUGHPUT", List.of("baldersheim"), "2020-12-02", "2023-12-31", "Selects type of sequenced executor used for feeding in proton, valid values are LATENCY, ADAPTIVE, THROUGHPUT", "Takes effect at redeployment (requires restart)", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag MAX_UNCOMMITTED_MEMORY = defineIntFlag( "max-uncommitted-memory", 130000, List.of("geirst, baldersheim"), "2021-10-21", "2023-12-31", "Max amount of memory holding updates to an attribute before we do a commit.", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundStringFlag RESPONSE_SEQUENCER_TYPE = defineStringFlag( "response-sequencer-type", "ADAPTIVE", List.of("baldersheim"), "2020-12-02", "2023-12-31", "Selects type of sequenced executor used for mbus responses, valid values are LATENCY, ADAPTIVE, THROUGHPUT", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag RESPONSE_NUM_THREADS = defineIntFlag( "response-num-threads", 2, List.of("baldersheim"), "2020-12-02", "2023-12-31", "Number of threads used for mbus responses, default is 2, negative number = numcores/4", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag SKIP_COMMUNICATIONMANAGER_THREAD = defineFeatureFlag( "skip-communicationmanager-thread", false, List.of("baldersheim"), "2020-12-02", "2023-12-31", "Should we skip the communicationmanager thread", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag SKIP_MBUS_REQUEST_THREAD = defineFeatureFlag( "skip-mbus-request-thread", false, List.of("baldersheim"), "2020-12-02", "2023-12-31", "Should we skip the mbus request thread", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag SKIP_MBUS_REPLY_THREAD = defineFeatureFlag( "skip-mbus-reply-thread", false, List.of("baldersheim"), "2020-12-02", "2023-12-31", "Should we skip the mbus reply thread", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag USE_ASYNC_MESSAGE_HANDLING_ON_SCHEDULE = defineFeatureFlag( "async-message-handling-on-schedule", false, List.of("baldersheim"), "2020-12-02", "2023-12-31", "Optionally deliver async messages in own thread", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundDoubleFlag FEED_CONCURRENCY = defineDoubleFlag( "feed-concurrency", 0.5, List.of("baldersheim"), "2020-12-02", "2023-12-31", "How much concurrency should be allowed for feed", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundDoubleFlag FEED_NICENESS = defineDoubleFlag( "feed-niceness", 0.0, List.of("baldersheim"), "2022-06-24", "2023-12-31", "How nice feeding shall be", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag MBUS_JAVA_NUM_TARGETS = defineIntFlag( @@ -161,71 +153,71 @@ public class Flags { List.of("baldersheim"), "2022-07-05", "2023-12-31", "Number of rpc targets per service", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag MBUS_CPP_NUM_TARGETS = defineIntFlag( "mbus-cpp-num-targets", 2, List.of("baldersheim"), "2022-07-05", "2023-12-31", "Number of rpc targets per service", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag RPC_NUM_TARGETS = defineIntFlag( "rpc-num-targets", 2, List.of("baldersheim"), "2022-07-05", "2023-12-31", "Number of rpc targets per content node", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag MBUS_JAVA_EVENTS_BEFORE_WAKEUP = defineIntFlag( "mbus-java-events-before-wakeup", 1, List.of("baldersheim"), "2022-07-05", "2023-12-31", "Number write events before waking up transport thread", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag MBUS_CPP_EVENTS_BEFORE_WAKEUP = defineIntFlag( "mbus-cpp-events-before-wakeup", 1, List.of("baldersheim"), "2022-07-05", "2023-12-31", "Number write events before waking up transport thread", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag RPC_EVENTS_BEFORE_WAKEUP = defineIntFlag( "rpc-events-before-wakeup", 1, List.of("baldersheim"), "2022-07-05", "2023-12-31", "Number write events before waking up transport thread", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag MBUS_NUM_NETWORK_THREADS = defineIntFlag( "mbus-num-network-threads", 1, List.of("baldersheim"), "2022-07-01", "2023-12-31", "Number of threads used for mbus network", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag SHARED_STRING_REPO_NO_RECLAIM = defineFeatureFlag( "shared-string-repo-no-reclaim", false, List.of("baldersheim"), "2022-06-14", "2023-12-31", "Controls whether we do track usage and reclaim unused enum values in shared string repo", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag CONTAINER_DUMP_HEAP_ON_SHUTDOWN_TIMEOUT = defineFeatureFlag( "container-dump-heap-on-shutdown-timeout", false, List.of("baldersheim"), "2021-09-25", "2023-12-31", "Will trigger a heap dump during if container shutdown times out", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag LOAD_CODE_AS_HUGEPAGES = defineFeatureFlag( "load-code-as-hugepages", false, List.of("baldersheim"), "2022-05-13", "2023-12-31", "Will try to map the code segment with huge (2M) pages", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundDoubleFlag CONTAINER_SHUTDOWN_TIMEOUT = defineDoubleFlag( "container-shutdown-timeout", 50.0, List.of("baldersheim"), "2021-09-25", "2023-12-31", "Timeout for shutdown of a jdisc container", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); // TODO: Move to a permanent flag public static final UnboundListFlag<String> ALLOWED_ATHENZ_PROXY_IDENTITIES = defineListFlag( @@ -240,28 +232,28 @@ public class Flags { "Allows replicas in up to N content groups to not be activated " + "for query visibility if they are out of sync with a majority of other replicas", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundDoubleFlag MIN_NODE_RATIO_PER_GROUP = defineDoubleFlag( "min-node-ratio-per-group", 0.0, List.of("geirst", "vekterli"), "2021-07-16", "2023-09-01", "Minimum ratio of nodes that have to be available (i.e. not Down) in any hierarchic content cluster group for the group to be Up", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundStringFlag SYSTEM_MEMORY_HIGH = defineStringFlag( "system-memory-high", "", List.of("baldersheim"), "2023-02-14", "2023-12-31", "The value to write to /sys/fs/cgroup/system.slice/memory.high, if non-empty.", "Takes effect on next tick.", - ZONE_ID, NODE_TYPE); + NODE_TYPE); public static final UnboundStringFlag SYSTEM_MEMORY_MAX = defineStringFlag( "system-memory-max", "", List.of("baldersheim"), "2023-02-14", "2023-12-31", "The value to write to /sys/fs/cgroup/system.slice/memory.max, if non-empty.", "Takes effect on next tick.", - ZONE_ID, NODE_TYPE); + NODE_TYPE); public static final UnboundBooleanFlag ENABLED_HORIZON_DASHBOARD = defineFeatureFlag( "enabled-horizon-dashboard", false, @@ -276,35 +268,35 @@ public class Flags { List.of("arnej"), "2021-11-12", "2023-12-31", "Whether C++ thread creation should ignore any requested stack size", "Triggers restart, takes effect immediately", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag USE_V8_GEO_POSITIONS = defineFeatureFlag( "use-v8-geo-positions", true, List.of("arnej"), "2021-11-15", "2023-12-31", "Use Vespa 8 types and formats for geographical positions", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag MAX_COMPACT_BUFFERS = defineIntFlag( "max-compact-buffers", 1, List.of("baldersheim", "geirst", "toregge"), "2021-12-15", "2023-12-31", "Upper limit of buffers to compact in a data store at the same time for each reason (memory usage, address space usage)", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag USE_QRSERVER_SERVICE_NAME = defineFeatureFlag( "use-qrserver-service-name", false, List.of("arnej"), "2022-01-18", "2023-12-31", "Use backwards-compatible 'qrserver' service name for containers with only 'search' API", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag AVOID_RENAMING_SUMMARY_FEATURES = defineFeatureFlag( "avoid-renaming-summary-features", true, List.of("arnej"), "2022-01-15", "2023-12-31", "Tell backend about the original name of summary-features that were wrapped in a rankingExpression feature", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag ENABLE_PROXY_PROTOCOL_MIXED_MODE = defineFeatureFlag( "enable-proxy-protocol-mixed-mode", true, @@ -318,7 +310,7 @@ public class Flags { List.of("arnej"), "2022-06-14", "2024-12-31", "Which algorithm to use for compressing log files. Valid values: empty string (default), gzip, zstd", "Takes effect immediately", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag SEPARATE_METRIC_CHECK_CONFIG = defineFeatureFlag( "separate-metric-check-config", false, @@ -354,7 +346,7 @@ public class Flags { List.of("vekterli"), "2022-11-03", "2023-10-01", "Specifies which public key to use for core dump encryption.", "Takes effect on the next tick.", - ZONE_ID, NODE_TYPE, HOSTNAME); + NODE_TYPE, HOSTNAME); public static final UnboundBooleanFlag ENABLE_GLOBAL_PHASE = defineFeatureFlag( "enable-global-phase", true, @@ -364,11 +356,11 @@ public class Flags { APPLICATION_ID); public static final UnboundBooleanFlag NODE_ADMIN_TENANT_SERVICE_REGISTRY = defineFeatureFlag( - "node-admin-tenant-service-registry", false, - List.of("olaa"), "2023-04-12", "2023-08-01", + "node-admin-tenant-service-registry", true, + List.of("olaa"), "2023-04-12", "2023-08-07", "Whether AthenzCredentialsMaintainer in node-admin should create tenant service identity certificate", "Takes effect on next tick", - ZONE_ID, HOSTNAME, VESPA_VERSION, APPLICATION_ID + HOSTNAME, VESPA_VERSION, APPLICATION_ID ); public static final UnboundBooleanFlag ENABLE_CROWDSTRIKE = defineFeatureFlag( @@ -380,7 +372,7 @@ public class Flags { "allow-more-than-one-content-group-down", false, List.of("hmusum"), "2023-04-14", "2023-08-15", "Whether to enable possible configuration of letting more than one content group down", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag RANDOMIZED_ENDPOINT_NAMES = defineFeatureFlag( "randomized-endpoint-names", false, List.of("andreer"), "2023-04-26", "2023-08-30", @@ -397,8 +389,7 @@ public class Flags { public static final UnboundBooleanFlag ENABLE_THE_ONE_THAT_SHOULD_NOT_BE_NAMED = defineFeatureFlag( "enable-the-one-that-should-not-be-named", false, List.of("hmusum"), "2023-05-08", "2023-08-15", "Whether to enable the one program that should not be named", - "Takes effect at next host-admin tick", - ZONE_ID); + "Takes effect at next host-admin tick"); public static final UnboundListFlag<String> WEIGHTED_ENDPOINT_RECORD_TTL = defineListFlag( "weighted-endpoint-record-ttl", List.of(), String.class, List.of("jonmv"), "2023-05-16", "2023-09-01", @@ -413,42 +404,40 @@ public class Flags { "will initiate a write-repair that evaluates the condition across all mutually inconsistent " + "replicas, with the newest document version (if any) being authoritative", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag ENABLE_DATAPLANE_PROXY = defineFeatureFlag( "enable-dataplane-proxy", false, List.of("mortent", "olaa"), "2023-05-15", "2023-08-01", "Whether to enable dataplane proxy", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID + APPLICATION_ID ); public static final UnboundBooleanFlag ENABLE_NESTED_MULTIVALUE_GROUPING = defineFeatureFlag( "enable-nested-multivalue-grouping", false, List.of("baldersheim"), "2023-06-29", "2023-12-31", "Should we enable proper nested multivalue grouping", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag USE_RECONFIGURABLE_DISPATCHER = defineFeatureFlag( "use-reconfigurable-dispatcher", false, List.of("jonmv"), "2023-07-14", "2023-10-01", "Whether to set up a ReconfigurableDispatcher with config self-sub for backend nodes", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag WRITE_CONFIG_SERVER_SESSION_DATA_AS_ONE_BLOB = defineFeatureFlag( "write-config-server-session-data-as-blob", false, List.of("hmuusm"), "2023-07-19", "2023-09-01", "Whether to write config server session data in one blob or as individual paths", - "Takes effect immediately", - ZONE_ID); + "Takes effect immediately"); public static final UnboundBooleanFlag READ_CONFIG_SERVER_SESSION_DATA_AS_ONE_BLOB = defineFeatureFlag( "read-config-server-session-data-as-blob", false, List.of("hmuusm"), "2023-07-19", "2023-09-01", "Whether to read config server session data from sesion data blob or from individual paths", - "Takes effect immediately", - ZONE_ID); + "Takes effect immediately"); public static final UnboundBooleanFlag USE_VESPA_USER_EVERYWHERE = defineFeatureFlag( "use-vespa-user-everywhere", false, @@ -538,6 +527,15 @@ public class Flags { * For instance, if APPLICATION is one of the dimensions here, you should make sure * APPLICATION is set to the ApplicationId in the fetch vector when fetching the RawFlag * from the FlagSource. + * SYSTEM, CLOUD, ENVIRONMENT, and ZONE_ID are special: These dimensions are resolved just + * before the flag data is published to a zone. This means there is never any need to set + * these dimensions when resolving a flag, and setting these dimensions just before resolving + * the flag will have no effect. + * There is one exception. If any of these dimensions are declared when defining a flag, + * then those dimensions are NOT resolved when published to the controllers. This allows + * the controller to resolve the flag to different values based on which cloud or zone + * it is operating on. Flags should NOT declare these dimensions unless they intend to + * use them in the controller in this way. * @param <T> The boxed type of the flag value, e.g. Boolean for flags guarding features. * @param <U> The type of the unbound flag, e.g. UnboundBooleanFlag. * @return An unbound flag with {@link FetchVector.Dimension#HOSTNAME HOSTNAME} and diff --git a/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java b/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java index 753f19a44f6..27852790186 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java @@ -6,6 +6,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.util.Collection; +import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; import static com.yahoo.yolean.Exceptions.uncheck; @@ -60,6 +61,26 @@ public class JsonNodeRawFlag implements RawFlag { return jsonNode.toString(); } + @Override + public String toString() { + return "JsonNodeRawFlag{" + + "jsonNode=" + jsonNode + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + JsonNodeRawFlag that = (JsonNodeRawFlag) o; + return jsonNode.equals(that.jsonNode); + } + + @Override + public int hashCode() { + return Objects.hash(jsonNode); + } + /** Initialize object mapper lazily */ private static ObjectMapper objectMapper() { // ObjectMapper is a heavy-weight object so we construct it only when we need it diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java index b7e81f56599..18f5f5f860d 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java @@ -21,7 +21,6 @@ import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME; import static com.yahoo.vespa.flags.FetchVector.Dimension.NODE_TYPE; import static com.yahoo.vespa.flags.FetchVector.Dimension.TENANT_ID; import static com.yahoo.vespa.flags.FetchVector.Dimension.VESPA_VERSION; -import static com.yahoo.vespa.flags.FetchVector.Dimension.ZONE_ID; /** * Definition for permanent feature flags @@ -43,19 +42,19 @@ public class PermanentFlags { "jvm-gc-options", "", "Sets default jvm gc options", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundIntFlag HEAP_SIZE_PERCENTAGE = defineIntFlag( "heap-size-percentage", 70, "Sets default jvm heap size percentage", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundDoubleFlag QUERY_DISPATCH_WARMUP = defineDoubleFlag( "query-dispatch-warmup", 5, "Warmup duration for query dispatcher", "Takes effect at redeployment (requires restart)", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag FLEET_CANARY = defineFeatureFlag( "fleet-canary", false, @@ -126,13 +125,13 @@ public class PermanentFlags { "min-disk-throughput-mb-s", 0, "Minimum required disk throughput performance, 0 = default, Only when using remote disk", "Takes effect when node is provisioned", - ZONE_ID, APPLICATION_ID, TENANT_ID, CLUSTER_ID, CLUSTER_TYPE); + APPLICATION_ID, TENANT_ID, CLUSTER_ID, CLUSTER_TYPE); public static final UnboundIntFlag MIN_DISK_IOPS_K = defineIntFlag( "min-disk-iops-k", 0, "Minimum required disk I/O operations per second, unit is kilo, 0 = default, Only when using remote disk", "Takes effect when node is provisioned", - ZONE_ID, APPLICATION_ID, TENANT_ID, CLUSTER_ID, CLUSTER_TYPE); + APPLICATION_ID, TENANT_ID, CLUSTER_ID, CLUSTER_TYPE); public static final UnboundListFlag<String> DISABLED_HOST_ADMIN_TASKS = defineListFlag( "disabled-host-admin-tasks", List.of(), String.class, @@ -145,7 +144,7 @@ public class PermanentFlags { "docker-image-repo", "", "Override default docker image repo. Docker image version will be Vespa version.", "Takes effect on next deployment from controller", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); private static final String VERSION_QUALIFIER_REGEX = "[a-zA-Z0-9_-]+"; private static final Pattern QUALIFIER_PATTERN = Pattern.compile("^" + VERSION_QUALIFIER_REGEX + "$"); @@ -250,29 +249,28 @@ public class PermanentFlags { "A list of environment variables set for all services. " + "Each item should be on the form <ENV_VAR>=<VALUE>", "Takes effect on service restart", - ZONE_ID, APPLICATION_ID + APPLICATION_ID ); public static final UnboundStringFlag CONFIG_PROXY_JVM_ARGS = defineStringFlag( "config-proxy-jvm-args", "", "Sets jvm args for config proxy (added at the end of startup command, will override existing ones)", "Takes effect on restart of Docker container", - ZONE_ID, APPLICATION_ID + APPLICATION_ID ); // This must be set in a feature flag to avoid flickering between the new and old value during config server upgrade public static final UnboundDoubleFlag HOST_MEMORY = defineDoubleFlag( "host-memory", 0.6, "The memory in GB required by a host's management processes.", - "Takes effect immediately", - ZONE_ID + "Takes effect immediately" ); public static final UnboundBooleanFlag FORWARD_ISSUES_AS_ERRORS = defineFeatureFlag( "forward-issues-as-errors", true, "When the backend detects a problematic issue with a query, it will by default send it as an error message to the QRS, which adds it in an ErrorHit in the result. May be disabled using this flag.", "Takes effect immediately", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundBooleanFlag DEACTIVATE_ROUTING = defineFeatureFlag( "deactivate-routing", false, @@ -285,7 +283,7 @@ public class PermanentFlags { "ignored-http-user-agents", List.of(), String.class, "List of user agents to ignore (crawlers etc)", "Takes effect immediately.", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundListFlag<String> INCOMPATIBLE_VERSIONS = defineListFlag( "incompatible-versions", List.of("8"), String.class, @@ -308,7 +306,7 @@ public class PermanentFlags { "(logserver and clustercontroller clusters).", "Takes effect on next redeployment", value -> Set.of("any", "arm64", "x86_64").contains(value), - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundListFlag<String> CLOUD_ACCOUNTS = defineListFlag( "cloud-accounts", List.of(), String.class, @@ -320,7 +318,7 @@ public class PermanentFlags { "fail-deployment-for-files-with-unknown-extension", "FAIL", "Whether to log or fail for deployments when app has a file with unknown extension (valid values: LOG, FAIL)", "Takes effect at redeployment", - ZONE_ID, APPLICATION_ID); + APPLICATION_ID); public static final UnboundListFlag<String> DISABLED_DEPLOYMENT_ZONES = defineListFlag( "disabled-deployment-zones", List.of(), String.class, @@ -339,8 +337,7 @@ public class PermanentFlags { "config-server-session-expiry-time", 3600, "Expiry time in seconds for remote sessions (session in ZooKeeper). Default should be equal to session lifetime, " + "but can be lowered if there are incidents/bugs where one needs to delete sessions", - "Takes effect immediately", - ZONE_ID + "Takes effect immediately" ); public static final UnboundBooleanFlag NOTIFICATION_DISPATCH_FLAG = defineFeatureFlag( @@ -353,7 +350,7 @@ public class PermanentFlags { "keep-file-references-on-tenant-nodes", 30, "How many days to keep file references on tenant nodes (based on last modification time)", "Takes effect on restart of Docker container", - ZONE_ID, APPLICATION_ID + APPLICATION_ID ); public static final UnboundIntFlag ENDPOINT_CONNECTION_TTL = defineIntFlag( diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/DimensionHelper.java b/flags/src/main/java/com/yahoo/vespa/flags/json/DimensionHelper.java index ad1242aa7e9..5e5506b616b 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/json/DimensionHelper.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/json/DimensionHelper.java @@ -15,15 +15,18 @@ public class DimensionHelper { private static final Map<FetchVector.Dimension, String> serializedDimensions = new HashMap<>(); static { - serializedDimensions.put(FetchVector.Dimension.ZONE_ID, "zone"); - serializedDimensions.put(FetchVector.Dimension.HOSTNAME, "hostname"); serializedDimensions.put(FetchVector.Dimension.APPLICATION_ID, "application"); - serializedDimensions.put(FetchVector.Dimension.NODE_TYPE, "node-type"); + serializedDimensions.put(FetchVector.Dimension.CLOUD, "cloud"); serializedDimensions.put(FetchVector.Dimension.CLUSTER_ID, "cluster-id"); serializedDimensions.put(FetchVector.Dimension.CLUSTER_TYPE, "cluster-type"); - serializedDimensions.put(FetchVector.Dimension.VESPA_VERSION, "vespa-version"); serializedDimensions.put(FetchVector.Dimension.CONSOLE_USER_EMAIL, "console-user-email"); + serializedDimensions.put(FetchVector.Dimension.ENVIRONMENT, "environment"); + serializedDimensions.put(FetchVector.Dimension.HOSTNAME, "hostname"); + serializedDimensions.put(FetchVector.Dimension.NODE_TYPE, "node-type"); + serializedDimensions.put(FetchVector.Dimension.SYSTEM, "system"); serializedDimensions.put(FetchVector.Dimension.TENANT_ID, "tenant"); + serializedDimensions.put(FetchVector.Dimension.VESPA_VERSION, "vespa-version"); + serializedDimensions.put(FetchVector.Dimension.ZONE_ID, "zone"); if (serializedDimensions.size() != FetchVector.Dimension.values().length) { throw new IllegalStateException(FetchVectorHelper.class.getName() + " is not in sync with " + diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/FlagData.java b/flags/src/main/java/com/yahoo/vespa/flags/json/FlagData.java index 19837e7dbe1..acda3b9db42 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/json/FlagData.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/json/FlagData.java @@ -13,9 +13,10 @@ import com.yahoo.vespa.flags.json.wire.WireRule; import java.io.InputStream; import java.io.OutputStream; +import java.util.ArrayList; import java.util.List; +import java.util.Objects; import java.util.Optional; -import java.util.stream.Collectors; import java.util.stream.Stream; /** @@ -53,6 +54,27 @@ public class FlagData { public boolean isEmpty() { return rules.isEmpty() && defaultFetchVector.isEmpty(); } + public FlagData partialResolve(FetchVector fetchVector) { + // Note: As a result of partialResolve, there could be e.g. two identical rules, and the latter will always be ignored by resolve(). + // Consider deduping. Deduping is actually not specific to partialResolve and could be done e.g. at construction time. + + List<Rule> newRules = new ArrayList<>(); + for (var rule : rules) { + Optional<Rule> partialRule = rule.partialResolve(fetchVector); + if (partialRule.isPresent()) { + newRules.add(partialRule.get()); + if (partialRule.get().conditions().isEmpty()) { + // Any following rule will always be ignored during resolution. + break; + } + } + } + + FetchVector newDefaultFetchVector = defaultFetchVector.without(fetchVector.dimensions()); + + return new FlagData(id, newDefaultFetchVector, newRules); + } + public Optional<RawFlag> resolve(FetchVector fetchVector) { return rules.stream() .filter(rule -> rule.match(defaultFetchVector.with(fetchVector))) @@ -91,6 +113,36 @@ public class FlagData { return wireFlagData; } + /** E.g. verify all RawFlag can be deserialized. */ + public void validate(Deserializer<?> deserializer) { + rules.stream() + .flatMap(rule -> rule.getValueToApply().map(Stream::of).orElse(null)) + .forEach(deserializer::deserialize); + + } + + @Override + public String toString() { + return "FlagData{" + + "id=" + id + + ", rules=" + rules + + ", defaultFetchVector=" + defaultFetchVector + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FlagData flagData = (FlagData) o; + return id.equals(flagData.id) && rules.equals(flagData.rules) && defaultFetchVector.equals(flagData.defaultFetchVector); + } + + @Override + public int hashCode() { + return Objects.hash(id, rules, defaultFetchVector); + } + public static FlagData deserializeUtf8Json(byte[] bytes) { return fromWire(WireFlagData.deserialize(bytes)); } @@ -138,13 +190,5 @@ public class FlagData { if (wireRules == null) return List.of(); return wireRules.stream().map(Rule::fromWire).toList(); } - - /** E.g. verify all RawFlag can be deserialized. */ - public void validate(Deserializer<?> deserializer) { - rules.stream() - .flatMap(rule -> rule.getValueToApply().map(Stream::of).orElse(null)) - .forEach(deserializer::deserialize); - - } } diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/ListCondition.java b/flags/src/main/java/com/yahoo/vespa/flags/json/ListCondition.java index c4b2d9be117..483f6750a73 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/json/ListCondition.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/json/ListCondition.java @@ -5,6 +5,7 @@ import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.json.wire.WireCondition; import java.util.List; +import java.util.Objects; /** * @author hakonhall @@ -55,4 +56,27 @@ public abstract class ListCondition implements Condition { condition.values = values.isEmpty() ? null : values; return condition; } + + @Override + public String toString() { + return "ListCondition{" + + "type=" + type + + ", dimension=" + dimension + + ", values=" + values + + ", isWhitelist=" + isWhitelist + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ListCondition that = (ListCondition) o; + return isWhitelist == that.isWhitelist && type == that.type && dimension == that.dimension && values.equals(that.values); + } + + @Override + public int hashCode() { + return Objects.hash(type, dimension, values, isWhitelist); + } } diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java b/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java index 0efeb831f2c..749f6830870 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java @@ -5,6 +5,7 @@ import com.yahoo.component.Version; import com.yahoo.vespa.flags.FetchVector; import com.yahoo.vespa.flags.json.wire.WireCondition; +import java.util.Objects; import java.util.function.Predicate; /** @@ -75,4 +76,26 @@ public class RelationalCondition implements Condition { condition.predicate = relationalPredicate.toWire(); return condition; } + + @Override + public String toString() { + return "RelationalCondition{" + + "relationalPredicate=" + relationalPredicate + + ", predicate=" + predicate + + ", dimension=" + dimension + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RelationalCondition that = (RelationalCondition) o; + return relationalPredicate.equals(that.relationalPredicate) && predicate.equals(that.predicate) && dimension == that.dimension; + } + + @Override + public int hashCode() { + return Objects.hash(relationalPredicate, predicate, dimension); + } } diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/Rule.java b/flags/src/main/java/com/yahoo/vespa/flags/json/Rule.java index bddaf8c9e0e..127c2b4f9da 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/json/Rule.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/json/Rule.java @@ -6,10 +6,11 @@ import com.yahoo.vespa.flags.JsonNodeRawFlag; import com.yahoo.vespa.flags.RawFlag; import com.yahoo.vespa.flags.json.wire.WireRule; +import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.Optional; -import java.util.stream.Collectors; /** * @author hakonhall @@ -45,6 +46,25 @@ public class Rule { .allMatch(condition -> !fetchVector.hasDimension(condition.dimension()) || condition.test(fetchVector)); } + /** + * Returns a copy of this rule without those conditions that can be resolved by the fetch vector. Returns empty + * if any of those conditions are false. + */ + public Optional<Rule> partialResolve(FetchVector fetchVector) { + List<Condition> newConditions = new ArrayList<>(); + for (var condition : andConditions) { + if (fetchVector.hasDimension(condition.dimension())) { + if (!condition.test(fetchVector)) { + return Optional.empty(); + } + } else { + newConditions.add(condition); + } + } + + return Optional.of(new Rule(valueToApply, newConditions)); + } + public Optional<RawFlag> getValueToApply() { return valueToApply; } @@ -68,4 +88,25 @@ public class Rule { Optional<RawFlag> value = wireRule.value == null ? Optional.empty() : Optional.of(JsonNodeRawFlag.fromJsonNode(wireRule.value)); return new Rule(value, conditions); } + + @Override + public String toString() { + return "Rule{" + + "andConditions=" + andConditions + + ", valueToApply=" + valueToApply + + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Rule rule = (Rule) o; + return andConditions.equals(rule.andConditions) && valueToApply.equals(rule.valueToApply); + } + + @Override + public int hashCode() { + return Objects.hash(andConditions, valueToApply); + } } diff --git a/flags/src/test/java/com/yahoo/vespa/flags/json/FlagDataTest.java b/flags/src/test/java/com/yahoo/vespa/flags/json/FlagDataTest.java index c89b5883fd1..c7da1abe7e2 100644 --- a/flags/src/test/java/com/yahoo/vespa/flags/json/FlagDataTest.java +++ b/flags/src/test/java/com/yahoo/vespa/flags/json/FlagDataTest.java @@ -15,44 +15,45 @@ import static org.junit.jupiter.api.Assertions.assertTrue; * @author hakonhall */ public class FlagDataTest { - private final String json = "{\n" + - " \"id\": \"id1\",\n" + - " \"rules\": [\n" + - " {\n" + - " \"conditions\": [\n" + - " {\n" + - " \"type\": \"whitelist\",\n" + - " \"dimension\": \"hostname\",\n" + - " \"values\": [ \"host1\", \"host2\" ]\n" + - " },\n" + - " {\n" + - " \"type\": \"blacklist\",\n" + - " \"dimension\": \"application\",\n" + - " \"values\": [ \"app1\", \"app2\" ]\n" + - " }\n" + - " ],\n" + - " \"value\": true\n" + - " },\n" + - " {\n" + - " \"conditions\": [\n" + - " {\n" + - " \"type\": \"whitelist\",\n" + - " \"dimension\": \"zone\",\n" + - " \"values\": [ \"zone1\", \"zone2\" ]\n" + - " }\n" + - " ],\n" + - " \"value\": false\n" + - " }\n" + - " ],\n" + - " \"attributes\": {\n" + - " \"zone\": \"zone1\"\n" + - " }\n" + - "}"; + private final String json = """ + { + "id": "id1", + "rules": [ + { + "conditions": [ + { + "type": "whitelist", + "dimension": "hostname", + "values": [ "host1", "host2" ] + }, + { + "type": "blacklist", + "dimension": "application", + "values": [ "app1", "app2" ] + } + ], + "value": true + }, + { + "conditions": [ + { + "type": "whitelist", + "dimension": "zone", + "values": [ "zone1", "zone2" ] + } + ], + "value": false + } + ], + "attributes": { + "zone": "zone1" + } + }"""; private final FetchVector vector = new FetchVector(); @Test - void test() { + void testResolve() { // Second rule matches with the default zone matching verify(Optional.of("false"), vector); @@ -74,6 +75,143 @@ public class FlagDataTest { verify(Optional.empty(), vector.with(FetchVector.Dimension.ZONE_ID, "unknown zone")); } + @Test + void testPartialResolve() { + FlagData data = FlagData.deserialize(json); + assertEquals(data.partialResolve(vector), data); + assertEquals(data.partialResolve(vector.with(FetchVector.Dimension.APPLICATION_ID, "app1")), + FlagData.deserialize(""" + { + "id": "id1", + "rules": [ + { + "conditions": [ + { + "type": "whitelist", + "dimension": "zone", + "values": [ "zone1", "zone2" ] + } + ], + "value": false + } + ], + "attributes": { + "zone": "zone1" + } + }""")); + + assertEquals(data.partialResolve(vector.with(FetchVector.Dimension.APPLICATION_ID, "app1")), + FlagData.deserialize(""" + { + "id": "id1", + "rules": [ + { + "conditions": [ + { + "type": "whitelist", + "dimension": "zone", + "values": [ "zone1", "zone2" ] + } + ], + "value": false + } + ], + "attributes": { + "zone": "zone1" + } + }""")); + + assertEquals(data.partialResolve(vector.with(FetchVector.Dimension.APPLICATION_ID, "app3")), + FlagData.deserialize(""" + { + "id": "id1", + "rules": [ + { + "conditions": [ + { + "type": "whitelist", + "dimension": "hostname", + "values": [ "host1", "host2" ] + } + ], + "value": true + }, + { + "conditions": [ + { + "type": "whitelist", + "dimension": "zone", + "values": [ "zone1", "zone2" ] + } + ], + "value": false + } + ], + "attributes": { + "zone": "zone1" + } + }""")); + + assertEquals(data.partialResolve(vector.with(FetchVector.Dimension.APPLICATION_ID, "app3") + .with(FetchVector.Dimension.HOSTNAME, "host1")), + FlagData.deserialize(""" + { + "id": "id1", + "rules": [ + { + "value": true + } + ], + "attributes": { + "zone": "zone1" + } + }""")); + + assertEquals(data.partialResolve(vector.with(FetchVector.Dimension.APPLICATION_ID, "app3") + .with(FetchVector.Dimension.HOSTNAME, "host3")), + FlagData.deserialize(""" + { + "id": "id1", + "rules": [ + { + "conditions": [ + { + "type": "whitelist", + "dimension": "zone", + "values": [ "zone1", "zone2" ] + } + ], + "value": false + } + ], + "attributes": { + "zone": "zone1" + } + }""")); + + assertEquals(data.partialResolve(vector.with(FetchVector.Dimension.APPLICATION_ID, "app3") + .with(FetchVector.Dimension.HOSTNAME, "host3") + .with(FetchVector.Dimension.ZONE_ID, "zone2")), + FlagData.deserialize(""" + { + "id": "id1", + "rules": [ + { + "value": false + } + ] + }""")); + + FlagData fullyResolved = data.partialResolve(vector.with(FetchVector.Dimension.APPLICATION_ID, "app3") + .with(FetchVector.Dimension.HOSTNAME, "host3") + .with(FetchVector.Dimension.ZONE_ID, "zone3")); + assertEquals(fullyResolved, FlagData.deserialize(""" + { + "id": "id1" + }""")); + assertTrue(fullyResolved.isEmpty()); + } + private void verify(Optional<String> expectedValue, FetchVector vector) { FlagData data = FlagData.deserialize(json); assertEquals("id1", data.id().toString()); diff --git a/lucene-linguistics/README.md b/lucene-linguistics/README.md new file mode 100644 index 00000000000..6329811e458 --- /dev/null +++ b/lucene-linguistics/README.md @@ -0,0 +1,93 @@ +# Vespa Lucene Linguistics + +Linguistics implementation based on Apache Lucene. +Features: +- a list of default analyzers per language; +- building custom analyzers through the configuration of the linguistics component; +- building custom analyzers in Java code and declaring them as `components`. + +## Development + +Build: +```shell +mvn clean test -U package +``` + +To compile configuration classes so that Intellij doesn't complain: +- right click on `pom.xml` +- then `Maven` +- then `Generate Sources and Update Folders` + +## Usage + +Add `<component>` to `services.xml` of your application package, e.g.: +```xml +<component id="com.yahoo.language.lucene.LuceneLinguistics" bundle="lucene-linguistics"> + <config name="com.yahoo.language.lucene.lucene-analysis"> + <configDir>linguistics</configDir> + <analysis> + <item key="en"> + <tokenizer> + <name>standard</name> + </tokenizer> + <tokenFilters> + <item> + <name>reverseString</name> + </item> + </tokenFilters> + </item> + </analysis> + </config> +</component> +``` +into `container` clusters that has `<document-processing/>` and/or `<search>` specified. + +And then package and deploy, e.g.: +```shell +(mvn clean -DskipTests=true -U package && vespa deploy -w 100) +``` + +### Configuration of Lucene Analyzers + +Read the Lucene docs of subclasses of: +- [TokenizerFactory](org.apache.lucene.analysis.TokenizerFactory), e.g. [StandardTokenizerFactory](https://lucene.apache.org/core/9_0_0/core/org/apache/lucene/analysis/standard/StandardTokenizerFactory.html) +- [CharFilterFactory](https://lucene.apache.org/core/9_0_0/core/org/apache/lucene/analysis/CharFilterFactory.html), e.g. [PatternReplaceCharFilterFactory](https://lucene.apache.org/core/8_1_1/analyzers-common/org/apache/lucene/analysis/pattern/PatternReplaceCharFilterFactory.html) +- [TokenFilterFactory](https://lucene.apache.org/core/8_1_1/analyzers-common/org/apache/lucene/analysis/util/TokenFilterFactory.html), e.g. [ReverseStringFilterFactory](https://lucene.apache.org/core/8_1_1/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilterFactory.html) + +E.g. tokenizer `StandardTokenizerFactory` has this config [snippet](https://lucene.apache.org/core/9_0_0/core/org/apache/lucene/analysis/standard/StandardTokenizerFactory.html): +```xml + <fieldType name="text_stndrd" class="solr.TextField" positionIncrementGap="100"> + <analyzer> + <tokenizer class="solr.StandardTokenizerFactory" maxTokenLength="255"/> + </analyzer> + </fieldType> +``` + +Then go to the [source code](https://github.com/apache/lucene/blob/17c13a76c87c6246f32dd7a78a26db04401ddb6e/lucene/core/src/java/org/apache/lucene/analysis/standard/StandardTokenizerFactory.java#L36) of the class on Github. +Copy value of the `public static final String NAME` into the `<name>` and observe the names used for configuring the tokenizer (in this case only `maxTokenLength`). +```xml +<tokenizer> + <name>standard</name> + <config> + <item key="maxTokenLength">255</item> + </config> +</tokenizer> +``` + +The `AnalyzerFactory` constructor logs the available analysis components. + +The analysis components are discovered through Java Service Provider Interface (SPI). +To add more analysis components it should be enough to put a Lucene analyzer dependency into your application package `pom.xml` +or register services and create classes directly in the application package. + +### Resource files + +The resource files are relative to the component config `configDir`. + +## Inspiration + +These projects: +- [vespa-chinese-linguistics](https://github.com/vespa-engine/sample-apps/blob/master/examples/vespa-chinese-linguistics/src/main/java/com/qihoo/language/JiebaLinguistics.java). +- [OpenNlp Linguistics](https://github.com/vespa-engine/vespa/blob/50d7555bfe7bdaec86f8b31c4d316c9ba66bb976/opennlp-linguistics/src/main/java/com/yahoo/language/opennlp/OpenNlpLinguistics.java) +- [vespa-kuromoji-linguistics](https://github.com/yahoojapan/vespa-kuromoji-linguistics/tree/main) +- [Clojure library](https://github.com/dainiusjocas/lucene-text-analysis) to work with Lucene analyzers diff --git a/lucene-linguistics/abi-spec.json b/lucene-linguistics/abi-spec.json new file mode 100644 index 00000000000..6f31cf5a2e6 --- /dev/null +++ b/lucene-linguistics/abi-spec.json @@ -0,0 +1 @@ +{ }
\ No newline at end of file diff --git a/lucene-linguistics/pom.xml b/lucene-linguistics/pom.xml new file mode 100644 index 00000000000..929d33a0736 --- /dev/null +++ b/lucene-linguistics/pom.xml @@ -0,0 +1,108 @@ +<?xml version="1.0" encoding="UTF-8"?> +<project xmlns="http://maven.apache.org/POM/4.0.0" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> + <modelVersion>4.0.0</modelVersion> + <parent> + <groupId>com.yahoo.vespa</groupId> + <artifactId>parent</artifactId> + <version>8-SNAPSHOT</version> + <relativePath>../parent/pom.xml</relativePath> + </parent> + + <artifactId>lucene-linguistics</artifactId> + <packaging>container-plugin</packaging> + <version>8-SNAPSHOT</version> + + <properties> + <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> + </properties> + + <dependencies> + <dependency> + <groupId>org.apache.lucene</groupId> + <artifactId>lucene-core</artifactId> + </dependency> + <dependency> + <groupId>org.apache.lucene</groupId> + <artifactId>lucene-analysis-common</artifactId> + </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>component</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>config-bundle</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>configdefinitions</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>annotations</artifactId> + <version>${project.version}</version> + <scope>provided</scope> + </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>vespajlib</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>com.yahoo.vespa</groupId> + <artifactId>linguistics</artifactId> + <version>${project.version}</version> + </dependency> + <dependency> + <groupId>com.google.inject</groupId> + <artifactId>guice</artifactId> + <classifier>no_aop</classifier> + <scope>provided</scope> + </dependency> + <dependency> + <groupId>junit</groupId> + <artifactId>junit</artifactId> + <scope>test</scope> + </dependency> + </dependencies> + + <build> + <plugins> + <plugin> + <groupId>com.yahoo.vespa</groupId> + <artifactId>bundle-plugin</artifactId> + <extensions>true</extensions> + <configuration> + <bundleType>CORE</bundleType> + <suppressWarningMissingImportPackages>true</suppressWarningMissingImportPackages> + </configuration> + </plugin> + <plugin> + <groupId>org.apache.maven.plugins</groupId> + <artifactId>maven-compiler-plugin</artifactId> + </plugin> + <plugin> + <groupId>com.yahoo.vespa</groupId> + <artifactId>abi-check-plugin</artifactId> + </plugin> + <plugin> + <groupId>com.yahoo.vespa</groupId> + <artifactId>config-class-plugin</artifactId> + <executions> + <execution> + <goals> + <goal>config-gen</goal> + </goals> + </execution> + </executions> + </plugin> + </plugins> + </build> +</project> diff --git a/lucene-linguistics/src/main/java/com/yahoo/language/lucene/AnalyzerFactory.java b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/AnalyzerFactory.java new file mode 100644 index 00000000000..b7d3a618954 --- /dev/null +++ b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/AnalyzerFactory.java @@ -0,0 +1,160 @@ +package com.yahoo.language.lucene; + +import com.yahoo.component.provider.ComponentRegistry; +import com.yahoo.language.Language; +import com.yahoo.language.process.StemMode; +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.CharFilterFactory; +import org.apache.lucene.analysis.TokenFilterFactory; +import org.apache.lucene.analysis.TokenizerFactory; +import org.apache.lucene.analysis.custom.CustomAnalyzer; +import org.apache.lucene.analysis.standard.StandardAnalyzer; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Logger; + +public class AnalyzerFactory { + private static final Logger log = Logger.getLogger(AnalyzerFactory.class.getName()); + + private final LuceneAnalysisConfig config; + + // Root config directory for all analysis components + private final Path configDir; + + // Registry of analyzers per language + // The idea is to create analyzers ONLY WHEN they are needed + // Analyzers are thread safe so no need to recreate them for every document + private final Map<String, Analyzer> languageAnalyzers = new HashMap<>(); + + private final Analyzer defaultAnalyzer = new StandardAnalyzer(); + + private final static String STANDARD_TOKENIZER = "standard"; + + private final ComponentRegistry<Analyzer> analyzerComponents; + private final DefaultAnalyzers defaultAnalyzers; + + public AnalyzerFactory(LuceneAnalysisConfig config, ComponentRegistry<Analyzer> analyzers) { + this.config = config; + this.configDir = config.configDir(); + this.analyzerComponents = analyzers; + this.defaultAnalyzers = DefaultAnalyzers.getInstance(); + log.info("Available in classpath char filters: " + CharFilterFactory.availableCharFilters()); + log.info("Available in classpath tokenizers: " + TokenizerFactory.availableTokenizers()); + log.info("Available in classpath token filters: " + TokenFilterFactory.availableTokenFilters()); + } + + /** + * Retrieves an analyzer with a given params. + * Sets up the analyzer if config is provided. + * Default analyzer is the `StandardAnalyzer`. + * @param language + * @param stemMode + * @param removeAccents + * @return + */ + public Analyzer getAnalyzer(Language language, StemMode stemMode, boolean removeAccents) { + String analyzerKey = generateKey(language, stemMode, removeAccents); + + // If analyzer for language is already known + if (null != languageAnalyzers.get(analyzerKey)) { + return languageAnalyzers.get(analyzerKey); + } + if (null != config.analysis(analyzerKey)) { + return setAndReturn(analyzerKey, setUpAnalyzer(analyzerKey)); + } + if (null != analyzerComponents.getComponent(analyzerKey)) { + log.info("Analyzer for language=" + analyzerKey + " is from components."); + return setAndReturn(analyzerKey, analyzerComponents.getComponent(analyzerKey)); + } + if (null != defaultAnalyzers.get(language)) { + log.info("Analyzer for language=" + analyzerKey + " is from a list of default language analyzers."); + return setAndReturn(analyzerKey, defaultAnalyzers.get(language)); + } + // set the default analyzer for the language + log.info("StandardAnalyzer is used for language=" + analyzerKey); + return setAndReturn(analyzerKey, defaultAnalyzer); + } + + private Analyzer setAndReturn(String analyzerKey, Analyzer analyzer) { + languageAnalyzers.put(analyzerKey, analyzer); + return analyzer; + } + + // TODO: Would it make sense to combine language + stemMode + removeAccents to make + // a composite key so we can have more variations possible? + private String generateKey(Language language, StemMode stemMode, boolean removeAccents) { + return language.languageCode(); + } + + private Analyzer setUpAnalyzer(String analyzerKey) { + try { + LuceneAnalysisConfig.Analysis analysis = config.analysis(analyzerKey); + log.info("Creating analyzer for: '" + analyzerKey + "' with config: " + analysis); + CustomAnalyzer.Builder builder = CustomAnalyzer.builder(configDir); + builder = withTokenizer(builder, analysis); + builder = addCharFilters(builder, analysis); + builder = addTokenFilters(builder, analysis); + return builder.build(); + } catch (Exception e) { + // Failing to set up the Analyzer, should blow up during testing and VAP should not be deployed. + // Most likely cause for problems is that a specified resource is not available in VAP. + // Unit tests should catch such problems and prevent the VAP being deployed. + log.severe("Failed to build analyzer: '" + + analyzerKey + + "', with configuration: '" + + config.analysis(analyzerKey) + + "' with exception: '" + + e.getMessage() + "'" ); + throw new RuntimeException(e); + } + } + + private CustomAnalyzer.Builder withTokenizer(CustomAnalyzer.Builder builder, + LuceneAnalysisConfig.Analysis analysis) throws IOException { + if (null == analysis) { + // By default we use the "standard" tokenizer + return builder.withTokenizer(STANDARD_TOKENIZER, new HashMap<>()); + } + String tokenizerName = analysis.tokenizer().name(); + Map<String, String> conf = analysis.tokenizer().conf(); + return builder.withTokenizer(tokenizerName, toModifiable(conf)); + } + + private CustomAnalyzer.Builder addCharFilters(CustomAnalyzer.Builder builder, + LuceneAnalysisConfig.Analysis analysis) throws IOException { + if (null == analysis) { + // by default there are no char filters + return builder; + } + for (LuceneAnalysisConfig.Analysis.CharFilters charFilter : analysis.charFilters()) { + builder.addCharFilter(charFilter.name(), toModifiable(charFilter.conf())); + } + return builder; + } + + private CustomAnalyzer.Builder addTokenFilters(CustomAnalyzer.Builder builder, + LuceneAnalysisConfig.Analysis analysis) throws IOException { + if (null == analysis) { + // by default no token filters are added + return builder; + } + for (LuceneAnalysisConfig.Analysis.TokenFilters tokenFilter : analysis.tokenFilters()) { + builder.addTokenFilter(tokenFilter.name(), toModifiable(tokenFilter.conf())); + } + return builder; + } + + /** + * A config map coming from the Vespa ConfigInstance is immutable while CustomAnalyzer builders + * mutates the map to mark that a param was consumed. Immutable maps can't be mutated! + * To overcome this conflict we can wrap the ConfigInstance map in a new HashMap. + * @param map + * @return Mutable Map + */ + private Map<String, String> toModifiable(Map<String, String> map) { + return new HashMap<>(map); + } +} diff --git a/lucene-linguistics/src/main/java/com/yahoo/language/lucene/DefaultAnalyzers.java b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/DefaultAnalyzers.java new file mode 100644 index 00000000000..955e18474f7 --- /dev/null +++ b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/DefaultAnalyzers.java @@ -0,0 +1,110 @@ +package com.yahoo.language.lucene; + +import com.yahoo.language.Language; +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.ar.ArabicAnalyzer; +import org.apache.lucene.analysis.bg.BulgarianAnalyzer; +import org.apache.lucene.analysis.bn.BengaliAnalyzer; +import org.apache.lucene.analysis.ca.CatalanAnalyzer; +import org.apache.lucene.analysis.ckb.SoraniAnalyzer; +import org.apache.lucene.analysis.cz.CzechAnalyzer; +import org.apache.lucene.analysis.da.DanishAnalyzer; +import org.apache.lucene.analysis.de.GermanAnalyzer; +import org.apache.lucene.analysis.el.GreekAnalyzer; +import org.apache.lucene.analysis.en.EnglishAnalyzer; +import org.apache.lucene.analysis.es.SpanishAnalyzer; +import org.apache.lucene.analysis.et.EstonianAnalyzer; +import org.apache.lucene.analysis.eu.BasqueAnalyzer; +import org.apache.lucene.analysis.fa.PersianAnalyzer; +import org.apache.lucene.analysis.fi.FinnishAnalyzer; +import org.apache.lucene.analysis.fr.FrenchAnalyzer; +import org.apache.lucene.analysis.ga.IrishAnalyzer; +import org.apache.lucene.analysis.gl.GalicianAnalyzer; +import org.apache.lucene.analysis.hi.HindiAnalyzer; +import org.apache.lucene.analysis.hu.HungarianAnalyzer; +import org.apache.lucene.analysis.hy.ArmenianAnalyzer; +import org.apache.lucene.analysis.id.IndonesianAnalyzer; +import org.apache.lucene.analysis.it.ItalianAnalyzer; +import org.apache.lucene.analysis.lt.LithuanianAnalyzer; +import org.apache.lucene.analysis.lv.LatvianAnalyzer; +import org.apache.lucene.analysis.ne.NepaliAnalyzer; +import org.apache.lucene.analysis.nl.DutchAnalyzer; +import org.apache.lucene.analysis.no.NorwegianAnalyzer; +import org.apache.lucene.analysis.pt.PortugueseAnalyzer; +import org.apache.lucene.analysis.ro.RomanianAnalyzer; +import org.apache.lucene.analysis.ru.RussianAnalyzer; +import org.apache.lucene.analysis.sr.SerbianAnalyzer; +import org.apache.lucene.analysis.sv.SwedishAnalyzer; +import org.apache.lucene.analysis.ta.TamilAnalyzer; +import org.apache.lucene.analysis.te.TeluguAnalyzer; +import org.apache.lucene.analysis.th.ThaiAnalyzer; +import org.apache.lucene.analysis.tr.TurkishAnalyzer; + +import java.util.Map; + +import static java.util.Map.entry; + +public class DefaultAnalyzers { + + private static DefaultAnalyzers INSTANCE; + private final Map<Language, Analyzer> analyzerClasses; + + private DefaultAnalyzers() { + analyzerClasses = Map.ofEntries( + entry(Language.ARABIC, new ArabicAnalyzer()), + entry(Language.BULGARIAN, new BulgarianAnalyzer()), + entry(Language.BENGALI, new BengaliAnalyzer()), + // analyzerClasses.put(Language.BRASILIAN, new BrazilianAnalyzer()) + entry(Language.CATALAN, new CatalanAnalyzer()), + // cjk analyzer? + entry(Language.KURDISH, new SoraniAnalyzer()), + entry(Language.CZECH, new CzechAnalyzer()), + entry(Language.DANISH, new DanishAnalyzer()), + entry(Language.GERMAN, new GermanAnalyzer()), + entry(Language.GREEK, new GreekAnalyzer()), + entry(Language.ENGLISH, new EnglishAnalyzer()), + entry(Language.SPANISH, new SpanishAnalyzer()), + entry(Language.ESTONIAN, new EstonianAnalyzer()), + entry(Language.BASQUE, new BasqueAnalyzer()), + entry(Language.PERSIAN, new PersianAnalyzer()), + entry(Language.FINNISH, new FinnishAnalyzer()), + entry(Language.FRENCH, new FrenchAnalyzer()), + entry(Language.IRISH, new IrishAnalyzer()), + entry(Language.GALICIAN, new GalicianAnalyzer()), + entry(Language.HINDI, new HindiAnalyzer()), + entry(Language.HUNGARIAN, new HungarianAnalyzer()), + entry(Language.ARMENIAN, new ArmenianAnalyzer()), + entry(Language.INDONESIAN, new IndonesianAnalyzer()), + entry(Language.ITALIAN, new ItalianAnalyzer()), + entry(Language.LITHUANIAN, new LithuanianAnalyzer()), + entry(Language.LATVIAN, new LatvianAnalyzer()), + entry(Language.NEPALI, new NepaliAnalyzer()), + entry(Language.DUTCH, new DutchAnalyzer()), + entry(Language.NORWEGIAN_BOKMAL, new NorwegianAnalyzer()), + entry(Language.PORTUGUESE, new PortugueseAnalyzer()), + entry(Language.ROMANIAN, new RomanianAnalyzer()), + entry(Language.RUSSIAN, new RussianAnalyzer()), + entry(Language.SERBIAN, new SerbianAnalyzer()), + entry(Language.SWEDISH, new SwedishAnalyzer()), + entry(Language.TAMIL, new TamilAnalyzer()), + entry(Language.TELUGU, new TeluguAnalyzer()), + entry(Language.THAI, new ThaiAnalyzer()), + entry(Language.TURKISH, new TurkishAnalyzer()) + ); + } + + public static DefaultAnalyzers getInstance() { + if (INSTANCE == null) { + INSTANCE = new DefaultAnalyzers(); + } + return INSTANCE; + } + + public Analyzer get(Language language) { + return analyzerClasses.get(language); + } + + public Analyzer get(String languageCode) { + return analyzerClasses.get(Language.fromLanguageTag(languageCode)); + } +} diff --git a/lucene-linguistics/src/main/java/com/yahoo/language/lucene/LuceneLinguistics.java b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/LuceneLinguistics.java new file mode 100644 index 00000000000..b5c5ba47ab6 --- /dev/null +++ b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/LuceneLinguistics.java @@ -0,0 +1,82 @@ +package com.yahoo.language.lucene; + +import com.google.inject.Inject; +import com.yahoo.component.provider.ComponentRegistry; +import com.yahoo.language.Linguistics; +import com.yahoo.language.process.*; +import com.yahoo.language.simple.SimpleLinguistics; +import org.apache.lucene.analysis.Analyzer; + +import java.util.ArrayList; +import java.util.logging.Logger; + +/** + * Factory of Lucene based linguistics processor. + * As described in the Linguistics docstring + * > the tokenizer should typically stem, transform and normalize + * The Stemmer, Transformer, Normalizer, and Segmenter implementations are mostly NOOP. + * + * TODO: docs for all available analysis components. + * TODO: some registry for available language Analyzers. + */ +public class LuceneLinguistics extends SimpleLinguistics { + + private static final Logger log = Logger.getLogger(LuceneLinguistics.class.getName()); + private final Normalizer normalizer; + private final Transformer transformer; + private final Tokenizer tokenizer; + private final Stemmer stemmer; + private final Segmenter segmenter; + private final LuceneAnalysisConfig config; + + @Inject + public LuceneLinguistics(LuceneAnalysisConfig config, ComponentRegistry<Analyzer> analyzers) { + log.info("Creating LuceneLinguistics with: " + config); + this.config = config; + this.tokenizer = new LuceneTokenizer(config, analyzers); + // NOOP stemmer + this.stemmer = (word, stemMode, language) -> { + ArrayList<StemList> stemLists = new ArrayList<>(); + StemList stems = new StemList(); + stems.add(word); + stemLists.add(stems); + return stemLists; + }; + // Segmenter that just wraps a tokenizer + this.segmenter = (string, language) -> { + ArrayList<String> segments = new ArrayList<>(); + Iterable<Token> tokens = tokenizer.tokenize(string, language, StemMode.NONE, false); + tokens.forEach(token -> segments.add(token.getTokenString())); + return segments; + }; + // NOOP normalizer + this.normalizer = (string) -> string; + // NOOP transformer + this.transformer = (string, language) -> string; + } + + @Override + public Stemmer getStemmer() { return stemmer; } + + @Override + public Tokenizer getTokenizer() { return tokenizer; } + + @Override + public Normalizer getNormalizer() { return normalizer; } + + @Override + public Transformer getTransformer() { return transformer; } + + @Override + public Segmenter getSegmenter() { return segmenter; } + + public LuceneAnalysisConfig getConfig() { + return config; + } + + @Override + public boolean equals(Linguistics other) { + return (other instanceof LuceneLinguistics) + // Config actually determines if Linguistics are equal + && config.equals(((LuceneLinguistics) other).getConfig()); } +} diff --git a/lucene-linguistics/src/main/java/com/yahoo/language/lucene/LuceneTokenizer.java b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/LuceneTokenizer.java new file mode 100644 index 00000000000..0cde849fd6e --- /dev/null +++ b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/LuceneTokenizer.java @@ -0,0 +1,68 @@ +package com.yahoo.language.lucene; + +import com.yahoo.component.provider.ComponentRegistry; +import com.yahoo.language.Language; +import com.yahoo.language.process.*; +import com.yahoo.language.simple.SimpleToken; +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; + +public class LuceneTokenizer implements Tokenizer { + + private static final Logger log = Logger.getLogger(LuceneTokenizer.class.getName()); + + // Dummy value, just to stuff the Lucene interface. + private final static String FIELD_NAME = "F"; + + private final AnalyzerFactory analyzerFactory; + + public LuceneTokenizer(LuceneAnalysisConfig config) { + this(config, new ComponentRegistry<>()); + } + public LuceneTokenizer(LuceneAnalysisConfig config, ComponentRegistry<Analyzer> analyzers) { + this.analyzerFactory = new AnalyzerFactory(config, analyzers); + } + + @Override + public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) { + if (input.isEmpty()) return List.of(); + + List<Token> tokens = textToTokens(input, analyzerFactory.getAnalyzer(language, stemMode, removeAccents)); + log.log(Level.FINEST, "Tokenized '" + language + "' text='" + input + "' into: n=" + tokens.size() + ", tokens=" + tokens); + return tokens; + } + + private List<Token> textToTokens(String text, Analyzer analyzer) { + List<Token> tokens = new ArrayList<>(); + TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, text); + + CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class); + OffsetAttribute offsetAttribute = tokenStream.addAttribute(OffsetAttribute.class); + try { + tokenStream.reset(); + while (tokenStream.incrementToken()) { + // TODO: is SimpleToken good enough? Maybe a custom implementation. + // TODO: what to do with cases when multiple tokens are inserted into the position? + String originalString = text.substring(offsetAttribute.startOffset(), offsetAttribute.endOffset()); + String tokenString = charTermAttribute.toString(); + tokens.add(new SimpleToken(originalString, tokenString) + .setType(TokenType.ALPHABETIC) + .setOffset(offsetAttribute.startOffset()) + .setScript(TokenScript.UNKNOWN)); + } + tokenStream.end(); + tokenStream.close(); + } catch (IOException e) { + throw new RuntimeException("Failed to analyze: " + text, e); + } + return tokens; + } +} diff --git a/lucene-linguistics/src/main/java/com/yahoo/language/lucene/package-info.java b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/package-info.java new file mode 100644 index 00000000000..14330723224 --- /dev/null +++ b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/package-info.java @@ -0,0 +1,4 @@ +@ExportPackage +package com.yahoo.language.lucene; + +import com.yahoo.osgi.annotation.ExportPackage; diff --git a/lucene-linguistics/src/main/resources/configdefinitions/lucene-analysis.def b/lucene-linguistics/src/main/resources/configdefinitions/lucene-analysis.def new file mode 100644 index 00000000000..e4b5037dcbe --- /dev/null +++ b/lucene-linguistics/src/main/resources/configdefinitions/lucene-analysis.def @@ -0,0 +1,14 @@ +package=com.yahoo.language.lucene + +# The schema ("type") for an application specified config type +# See +# - https://docs.vespa.ai/en/reference/config-files.html + +configDir path +analysis{}.tokenizer.name string default=standard +analysis{}.tokenizer.conf{} string + +analysis{}.charFilters[].name string +analysis{}.charFilters[].conf{} string +analysis{}.tokenFilters[].name string +analysis{}.tokenFilters[].conf{} string diff --git a/lucene-linguistics/src/test/java/com/yahoo/language/lucene/LuceneTokenizerTest.java b/lucene-linguistics/src/test/java/com/yahoo/language/lucene/LuceneTokenizerTest.java new file mode 100644 index 00000000000..568f295b39d --- /dev/null +++ b/lucene-linguistics/src/test/java/com/yahoo/language/lucene/LuceneTokenizerTest.java @@ -0,0 +1,139 @@ +package com.yahoo.language.lucene; + +import com.yahoo.component.provider.ComponentRegistry; +import com.yahoo.config.FileReference; +import com.yahoo.language.Language; +import com.yahoo.language.process.StemMode; +import com.yahoo.language.process.Token; +import org.junit.Test; + +import java.io.File; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + +public class LuceneTokenizerTest { + + @Test + public void testTokenizer() { + String text = "This is my Text"; + var tokenizer = new LuceneTokenizer(new LuceneAnalysisConfig + .Builder() + .configDir(FileReference.mockFileReferenceForUnitTesting(new File("."))) + .build()); + Iterable<Token> tokens = tokenizer + .tokenize(text, Language.ENGLISH, StemMode.ALL, true); + assertEquals(List.of("my", "text"), tokenStrings(tokens)); + } + + @Test + public void testLithuanianTokenizer() { + String text = "Žalgirio mūšio data yra 1410 metai"; + var tokenizer = new LuceneTokenizer(new LuceneAnalysisConfig + .Builder() + .configDir(FileReference.mockFileReferenceForUnitTesting(new File("."))) + .build()); + Iterable<Token> tokens = tokenizer + .tokenize(text, Language.LITHUANIAN, StemMode.ALL, true); + assertEquals(List.of("žalgir", "mūš", "dat", "1410", "met"), tokenStrings(tokens)); + } + + private void assertToken(String tokenString, Iterator<Token> tokens) { + Token t = tokens.next(); + assertEquals(tokenString, t.getTokenString()); + } + + private List<Token> iterableToList(Iterable<Token> tokens) { + List<Token> tokenList = new ArrayList<>(); + tokens.forEach(tokenList::add); + return tokenList; + } + + private List<String> tokenStrings(Iterable<Token> tokens) { + List<String> tokenList = new ArrayList<>(); + tokens.forEach(token -> { + tokenList.add(token.getTokenString()); + }); + return tokenList; + } + + @Test + public void testAnalyzerConfiguration() { + String languageCode = Language.ENGLISH.languageCode(); + LuceneAnalysisConfig enConfig = new LuceneAnalysisConfig.Builder() + .configDir(FileReference.mockFileReferenceForUnitTesting(new File("."))) + .analysis( + Map.of(languageCode, + new LuceneAnalysisConfig + .Analysis + .Builder() + .tokenFilters(List.of( + new LuceneAnalysisConfig + .Analysis + .TokenFilters + .Builder() + .name("englishMinimalStem"), + new LuceneAnalysisConfig + .Analysis + .TokenFilters + .Builder() + .name("uppercase")))) + ).build(); + LuceneLinguistics linguistics = new LuceneLinguistics(enConfig, new ComponentRegistry<>()); + Iterable<Token> tokens = linguistics + .getTokenizer() + .tokenize("Dogs and cats", Language.ENGLISH, StemMode.ALL, false); + assertEquals(List.of("DOG", "AND", "CAT"), tokenStrings(tokens)); + } + + @Test + public void testEnglishStemmerAnalyzerConfiguration() { + String languageCode = Language.ENGLISH.languageCode(); + LuceneAnalysisConfig enConfig = new LuceneAnalysisConfig.Builder() + .configDir(FileReference.mockFileReferenceForUnitTesting(new File("."))) + .analysis( + Map.of(languageCode, + new LuceneAnalysisConfig.Analysis.Builder().tokenFilters(List.of( + new LuceneAnalysisConfig + .Analysis + .TokenFilters + .Builder() + .name("englishMinimalStem")))) + ).build(); + LuceneLinguistics linguistics = new LuceneLinguistics(enConfig, new ComponentRegistry<>()); + Iterable<Token> tokens = linguistics + .getTokenizer() + .tokenize("Dogs and Cats", Language.ENGLISH, StemMode.ALL, false); + assertEquals(List.of("Dog", "and", "Cat"), tokenStrings(tokens)); + } + + @Test + public void testStemmerWithStopWords() { + String languageCode = Language.ENGLISH.languageCode(); + LuceneAnalysisConfig enConfig = new LuceneAnalysisConfig.Builder() + .configDir(FileReference.mockFileReferenceForUnitTesting(new File("."))) + .analysis( + Map.of(languageCode, + new LuceneAnalysisConfig.Analysis.Builder().tokenFilters(List.of( + new LuceneAnalysisConfig + .Analysis + .TokenFilters + .Builder() + .name("englishMinimalStem"), + new LuceneAnalysisConfig + .Analysis + .TokenFilters + .Builder() + .name("stop") + .conf("words", "stopwords.txt")))) + ).build(); + LuceneLinguistics linguistics = new LuceneLinguistics(enConfig, new ComponentRegistry<>()); + Iterable<Token> tokens = linguistics + .getTokenizer() + .tokenize("Dogs and Cats", Language.ENGLISH, StemMode.ALL, false); + assertEquals(List.of("Dog", "Cat"), tokenStrings(tokens)); + } +} diff --git a/lucene-linguistics/src/test/resources/stopwords.txt b/lucene-linguistics/src/test/resources/stopwords.txt new file mode 100644 index 00000000000..e8c07838bf5 --- /dev/null +++ b/lucene-linguistics/src/test/resources/stopwords.txt @@ -0,0 +1 @@ +and diff --git a/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java index 9936b4612c5..ca028547171 100644 --- a/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java +++ b/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java @@ -27,8 +27,11 @@ public enum ConfigServerMetrics implements VespaMetrics { MAINTENANCE_DEPLOYMENT_TRANSIENT_FAILURE("maintenanceDeployment.transientFailure", Unit.OPERATION, "Number of maintenance deployments that failed with a transient failure"), MAINTENANCE_DEPLOYMENT_FAILURE("maintenanceDeployment.failure", Unit.OPERATION, "Number of maintenance deployments that failed with a permanent failure"), + MAINTENANCE_SUCCESS_FACTOR_DEVIATION("maintenance.successFactorDeviation", Unit.FRACTION, "Configserver: Maintenance Success Factor Deviation"), + MAINTENANCE_DURATION("maintenance.duration", Unit.MILLISECOND, "Configserver: Maintenance Duration"), + // ZooKeeper related metrics - ZK_CONNECTIONS_LOST("configserver.zkConnectionLost", Unit.CONNECTION, "Number of ZooKeeper connections lost"), + ZK_CONNECTION_LOST("configserver.zkConnectionLost", Unit.CONNECTION, "Number of ZooKeeper connections lost"), ZK_RECONNECTED("configserver.zkReconnected", Unit.CONNECTION, "Number of ZooKeeper reconnections"), ZK_CONNECTED("configserver.zkConnected", Unit.NODE, "Number of ZooKeeper nodes connected"), ZK_SUSPENDED("configserver.zkSuspended", Unit.NODE, "Number of ZooKeeper nodes suspended"), @@ -107,9 +110,41 @@ public enum ConfigServerMetrics implements VespaMetrics { HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_CPU("hostedVespa.docker.allocatedCapacityCpu", Unit.VCPU, "Total number of allocated VCPUs on tenant hosts managed by hosted Vespa in a zone"), HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_MEM("hostedVespa.docker.allocatedCapacityMem", Unit.GIGABYTE, "Total amount of allocated memory on tenant hosts managed by hosted Vespa in a zone"), HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_DISK("hostedVespa.docker.allocatedCapacityDisk", Unit.GIGABYTE, "Total amount of allocated disk space on tenant hosts managed by hosted Vespa in a zone"), - HOSTED_VESPA_BREAKFIXED_HOSTS("hostedVespa.breakfixedHosts", Unit.HOST, "Number of hosts managed that are breakfixed in a zone"), HOSTED_VESPA_PENDING_REDEPLOYMENTS("hostedVespa.pendingRedeployments", Unit.TASK, "The number of hosted Vespa re-deployments pending"), - HOSTED_VESPA_DOCKER_SKEW("hostedVespa.docker.skew", Unit.FRACTION, "A number in the range 0..1 indicating how well allocated resources are balanced with availability on hosts"); + HOSTED_VESPA_DOCKER_SKEW("hostedVespa.docker.skew", Unit.FRACTION, "A number in the range 0..1 indicating how well allocated resources are balanced with availability on hosts"), + + HOSTED_VESPA_ACTIVE_HOSTS("hostedVespa.activeHosts", Unit.HOST, "The number of managed hosts that are in state \"active\""), + HOSTED_VESPA_BREAKFIXED_HOSTS("hostedVespa.breakfixedHosts", Unit.HOST, "The number of managed hosts that are in state \"breakfixed\""), + HOSTED_VESPA_DEPROVISIONED_HOSTS("hostedVespa.deprovisionedHosts", Unit.HOST, "The number of managed hosts that are in state \"deprovisioned\""), + HOSTED_VESPA_DIRTY_HOSTS("hostedVespa.dirtyHosts", Unit.HOST, "The number of managed hosts that are in state \"dirty\""), + HOSTED_VESPA_FAILED_HOSTS("hostedVespa.failedHosts", Unit.HOST, "The number of managed hosts that are in state \"failed\""), + HOSTED_VESPA_INACTIVE_HOSTS("hostedVespa.inactiveHosts", Unit.HOST, "The number of managed hosts that are in state \"inactive\""), + HOSTED_VESPA_PARKED_HOSTS("hostedVespa.parkedHosts", Unit.HOST, "The number of managed hosts that are in state \"parked\""), + HOSTED_VESPA_PROVISIONED_HOSTS("hostedVespa.provisionedHosts", Unit.HOST, "The number of managed hosts that are in state \"provisioned\""), + HOSTED_VESPA_READY_HOSTS("hostedVespa.readyHosts", Unit.HOST, "The number of managed hosts that are in state \"ready\""), + HOSTED_VESPA_RESERVED_HOSTS("hostedVespa.reservedHosts", Unit.HOST, "The number of managed hosts that are in state \"reserved\""), + + HOSTED_VESPA_ACTIVE_NODES("hostedVespa.activeNodes", Unit.HOST, "The number of managed nodes that are in state \"active\""), + HOSTED_VESPA_BREAKFIXED_NODES("hostedVespa.breakfixedNodes", Unit.HOST, "The number of managed nodes that are in state \"breakfixed\""), + HOSTED_VESPA_DEPROVISIONED_NODES("hostedVespa.deprovisionedNodes", Unit.HOST, "The number of managed nodes that are in state \"deprovisioned\""), + HOSTED_VESPA_DIRTY_NODES("hostedVespa.dirtyNodes", Unit.HOST, "The number of managed nodes that are in state \"dirty\""), + HOSTED_VESPA_FAILED_NODES("hostedVespa.failedNodes", Unit.HOST, "The number of managed nodes that are in state \"failed\""), + HOSTED_VESPA_INACTIVE_NODES("hostedVespa.inactiveNodes", Unit.HOST, "The number of managed nodes that are in state \"inactive\""), + HOSTED_VESPA_PARKED_NODES("hostedVespa.parkedNodes", Unit.HOST, "The number of managed nodes that are in state \"parked\""), + HOSTED_VESPA_PROVISIONED_NODES("hostedVespa.provisionedNodes", Unit.HOST, "The number of managed nodes that are in state \"provisioned\""), + HOSTED_VESPA_READY_NODES("hostedVespa.readyNodes", Unit.HOST, "The number of managed nodes that are in state \"ready\""), + HOSTED_VESPA_RESERVED_NODES("hostedVespa.reservedNodes", Unit.HOST, "The number of managed nodes that are in state \"reserved\""), + + + OVERCOMMITTED_HOSTS("overcommittedHosts", Unit.HOST, "The number of hosts with over-committed resources"), + SPARE_HOST_CAPACITY("spareHostCapacity", Unit.HOST, "The number of spare hosts"), + THROTTLED_HOST_FAILURES("throttledHostFailures", Unit.HOST, "Number of host failures stopped due to throttling"), + THROTTLED_NODE_FAILURES("throttledNodeFailures", Unit.HOST, "Number of node failures stopped due to throttling"), + NODE_FAIL_THROTTLING("nodeFailThrottling", Unit.BINARY, "Metric indicating when node failure throttling is active. The value 1 means active, 0 means inactive"), + + DEPLOYMENT_PREPARE_MILLIS("deployment.prepareMillis", Unit.MILLISECOND, "Duration of deployment preparations"), + DEPLOYMENT_ACTIVATE_MILLIS("deployment.activateMillis", Unit.MILLISECOND, "Duration of deployment activations"); + private final String name; private final Unit unit; diff --git a/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java index ab3fb9b6197..98bd6230762 100644 --- a/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java +++ b/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java @@ -196,7 +196,7 @@ public enum ContainerMetrics implements VespaMetrics { SERVER_NUM_SUCCESSFUL_RESPONSE_WRITES("serverNumSuccessfulResponseWrites", Unit.REQUEST, "Number of successful response writes"), SERVER_NUM_FAILED_RESPONSE_WRITES("serverNumFailedResponseWrites", Unit.REQUEST, "Number of failed response writes"), - SERVER_TOTAL_SUCCESFUL_RESPONSE_LATENCY("serverTotalSuccessfulResponseLatency", Unit.MILLISECOND, "Total duration for execution of successful responses"), + SERVER_TOTAL_SUCCESSFUL_RESPONSE_LATENCY("serverTotalSuccessfulResponseLatency", Unit.MILLISECOND, "Total duration for execution of successful responses"), SERVER_TOTAL_FAILED_RESPONSE_LATENCY("serverTotalFailedResponseLatency", Unit.MILLISECOND, "Total duration for execution of failed responses"), SERVER_TIME_TO_FIRST_BYTE("serverTimeToFirstByte", Unit.MILLISECOND, "Time from request has been received by the server until the first byte is returned to the client"), diff --git a/metrics/src/main/java/ai/vespa/metrics/ControllerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ControllerMetrics.java new file mode 100644 index 00000000000..4770fe51830 --- /dev/null +++ b/metrics/src/main/java/ai/vespa/metrics/ControllerMetrics.java @@ -0,0 +1,82 @@ +package ai.vespa.metrics; + +/** + * @author yngveaasheim + */ +public enum ControllerMetrics implements VespaMetrics { + + ATHENZ_REQUEST_ERROR("athenz.request.error", Unit.REQUEST, "Controller: Athenz request error"), + ARCHIVE_BUCKET_COUNT("archive.bucketCount", Unit.BUCKET, "Controller: Archive bucket count"), + + DEPLOYMENT_START("deployment.start", Unit.DEPLOYMENT, "The number of started deployment jobs"), + DEPLOYMENT_NODE_ALLOCATION_FAILURE("deployment.nodeAllocationFailure", Unit.DEPLOYMENT, "The number of deployments failed due to node allocation failures"), + DEPLOYMENT_ENDPOINT_CERTIFICATE_TIMEOUT("deployment.endpointCertificateTimeout", Unit.DEPLOYMENT, "The number of deployments failed due to timeout acquiring endpoint certificate"), + DEPLOYMENT_DEPLOYMENT_FAILURE("deployment.deploymentFailure", Unit.DEPLOYMENT, "The number of deployments that failed"), + DEPLOYMENT_INVALID_APPLICATION("deployment.invalidApplication", Unit.DEPLOYMENT, "Deployments with invalid application package"), + DEPLOYMENT_CONVERGENCE_FAILURE("deployment.convergenceFailure", Unit.DEPLOYMENT, "The number of deployments with convergence failure"), + DEPLOYMENT_TEST_FAILURE("deployment.testFailure", Unit.DEPLOYMENT, "The number of test deployments with test failure"), + DEPLOYMENT_NO_TESTS("deployment.noTests", Unit.DEPLOYMENT, "Deployments with no tests"), + DEPLOYMENT_ERROR("deployment.error", Unit.DEPLOYMENT, "Deployments with error"), + DEPLOYMENT_ABORT("deployment.abort", Unit.DEPLOYMENT, "Deployments that were aborted"), + DEPLOYMENT_CANCEL("deployment.cancel", Unit.DEPLOYMENT, "Deployments that were canceled"), + DEPLOYMENT_SUCCESS("deployment.success", Unit.DEPLOYMENT, "Successful deployments"), + DEPLOYMENT_QUOTA_EXCEEDED("deployment.quotaExceeded", Unit.DEPLOYMENT, "Deployments stopped due to exceeding quota"), + BILLING_TENANTS("billing.tenants", Unit.TENANT, "Billing tenants"), + DEPLOYMENT_FAILURE_PERCENTAGE("deployment.failurePercentage", Unit.PERCENTAGE, "Deployment: Failure percentage"), + DEPLOYMENT_AVERAGE_DURATION("deployment.averageDuration", Unit.SECOND, "Deployment duration"), + DEPLOYMENT_FAILING_UPGRADES("deployment.failingUpgrades", Unit.DEPLOYMENT, "Deployment: Failing upgrades"), + DEPLOYMENT_BUILD_AGE_SECONDS("deployment.buildAgeSeconds", Unit.SECOND, "Deployment: The age of a build deployed"), + DEPLOYMENT_WARNINGS("deployment.warnings", Unit.ITEM, "The number of application related warnings during deployments"), + DEPLOYMENT_OVERDUE_UPGRADE_SECONDS("deployment.overdueUpgradeSeconds", Unit.SECOND, "Deployment: Overdue upgrade period"), + DEPLOYMENT_OS_CHANGE_DURATION("deployment.osChangeDuration", Unit.SECOND, "Deployment: OS change duration"), + DEPLOYMENT_PLATFORM_CHANGE_DURATION("deployment.platformChangeDuration", Unit.SECOND, "Deployment: Platform change duration"), + DEPLOYMENT_NODE_COUNT_BY_OS_VERSION("deployment.nodeCountByOsVersion", Unit.NODE, "Deployment: Node count by OS version"), + DEPLOYMENT_NODE_COUNT_BY_PLATFORM_VERSION("deployment.nodeCountByPlatformVersion", Unit.NODE, "Deployment: Node count by platform version"), + DEPLOYMENT_BROKEN_SYSTEM_VERSION("deployment.brokenSystemVersion", Unit.BINARY, "Deployment: Value 1 for broken system versions, 0 if not"), + REMAINING_ROTATIONS("remaining_rotations", Unit.ROTATION, "Remaining rotations"), + DNS_QUEUED_REQUESTS("dns.queuedRequests", Unit.REQUEST, "Queued DNS requests"), + ZMS_QUOTA_USAGE("zms.quota.usage", Unit.FRACTION, "ZMS Quota usage per resource type"), + COREDUMP_PROCESSED("coredump.processed", Unit.FAILURE,"Controller: Core dumps processed"), + + // Metrics per API, metrics names generated in ControllerMaintainer/MetricsReporter + OPERATION_APPLICATION("operation.application", Unit.REQUEST, "Controller: Requests for /application API"), + OPERATION_CHANGEMANAGEMENT("operation.changemanagement", Unit.REQUEST, "Controller: Requests for /changemanagement API"), + OPERATION_CONFIGSERVER("operation.configserver", Unit.REQUEST, "Controller: Requests for /configserver API"), + OPERATION_CONTROLLER("operation.controller", Unit.REQUEST, "Controller: Requests for /controller API"), + OPERATION_FLAGS("operation.flags", Unit.REQUEST, "Controller: Requests for /flags API"), + OPERATION_OS("operation.os", Unit.REQUEST, "Controller: Requests for /os API"), + OPERATION_ROUTING("operation.routing", Unit.REQUEST, "Controller: Requests for /routing API"), + OPERATION_ZONE("operation.zone", Unit.REQUEST, "Controller: Requests for /zone API"), + + // Metering metrics - not used - TODO: remove from controller code. + METERING_AGE_SECONDS("metering.age.seconds", Unit.SECOND, "Controller: Metering age seconds"), + METERING_COST_HOURLY("metering.cost.hourly", Unit.DOLLAR_PER_HOUR, "Controller: Metering cost hourly"), + METERING_DISK_GB("metering.diskGB", Unit.GIGABYTE, "Controller: Metering disk GB"), + METERING_MEMORY_GB("metering.memoryGB", Unit.GIGABYTE, "Controller: Metering memory GB"), + METERING_VCPU("metering.vcpu", Unit.VCPU, "Controller: Metering VCPU"), + METERING_LAST_REPORTED("metering_last_reported", Unit.SECONDS_SINCE_EPOCH, "Controller: Metering last reported"), + METERING_TOTAL_REPORTED("metering_total_reported", Unit.ITEM, "Controller: Metering total reported (sum of resources)"); + + private final String name; + private final Unit unit; + private final String description; + + ControllerMetrics(String name, Unit unit, String description) { + this.name = name; + this.unit = unit; + this.description = description; + } + + public String baseName() { + return name; + } + + public Unit unit() { + return unit; + } + + public String description() { + return description; + } + +} diff --git a/metrics/src/main/java/ai/vespa/metrics/Unit.java b/metrics/src/main/java/ai/vespa/metrics/Unit.java index d514b9e9839..48b4e72891f 100644 --- a/metrics/src/main/java/ai/vespa/metrics/Unit.java +++ b/metrics/src/main/java/ai/vespa/metrics/Unit.java @@ -10,6 +10,7 @@ public enum Unit { BYTE(BaseUnit.BYTE, "A collection of 8 bits"), BYTE_PER_SECOND(BaseUnit.BYTE, BaseUnit.SECOND, "A unit of storage capable of holding 8 bits"), CONNECTION(BaseUnit.CONNECTION, "A link used for communication between a client and a server"), + DEPLOYMENT(BaseUnit.DEPLOYMENT, "A deployment on hosted Vespa"), DOCUMENT(BaseUnit.DOCUMENT, "Vespa document, a collection of fields defined in a schema file"), DOCUMENTID(BaseUnit.DOCUMENTID, "A unique document identifier"), DOLLAR_PER_HOUR(BaseUnit.DOLLAR, BaseUnit.HOUR, "Total current cost of the cluster in $/hr"), @@ -36,10 +37,13 @@ public enum Unit { REQUEST(BaseUnit.REQUEST, "A request sent from a client to a server"), RESPONSE(BaseUnit.RESPONSE, "A response from a server to a client, typically as a response to a request"), RESTART(BaseUnit.RESTART, "A service or node restarts"), + ROTATION(BaseUnit.ROTATION, "Routing rotation"), SCORE(BaseUnit.SCORE, "Relevance score for a document"), SECOND(BaseUnit.SECOND, "Time span of 1 second"), + SECONDS_SINCE_EPOCH(BaseUnit.SECONDS_SINCE_EPOCH,"Seconds since Unix Epoch"), SESSION(BaseUnit.SESSION, "A set of operations taking place during one connection or as part of a higher level operation"), TASK(BaseUnit.TASK, "Piece of work executed by a server, e.g. to perform back-ground data maintenance"), + TENANT(BaseUnit.TENANT, "Tenant that owns zero or more applications in a managed Vespa system"), THREAD(BaseUnit.THREAD, "Computer thread for executing e.g. tasks, operations or queries"), VCPU(BaseUnit.VCPU,"Virtual CPU"), @@ -84,6 +88,7 @@ public enum Unit { BUCKET("bucket"), BYTE("byte"), CONNECTION("connection"), + DEPLOYMENT("deployment"), DOCUMENT("document"), DOCUMENTID("documentid"), DOLLAR("dollar"), @@ -108,10 +113,13 @@ public enum Unit { REQUEST("request"), RESPONSE("response"), RESTART("restart"), + ROTATION("routing rotation"), SCORE("score"), SECOND("second", "s"), + SECONDS_SINCE_EPOCH("seconds since epoch"), SESSION("session"), TASK("task"), + TENANT("tenant"), THREAD("thread"), VCPU("vcpu"), VERSION("version"), diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java index a80f07acba2..864566f119e 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/Node.java @@ -616,8 +616,8 @@ public final class Node implements Nodelike { } /** Returns the ACL for the node (trusted nodes, networks and ports) */ - public NodeAcl acl(NodeList allNodes, LoadBalancers loadBalancers, Zone zone, boolean simplerAcl) { - return NodeAcl.from(this, allNodes, loadBalancers, zone, simplerAcl); + public NodeAcl acl(NodeList allNodes, LoadBalancers loadBalancers, Zone zone) { + return NodeAcl.from(this, allNodes, loadBalancers, zone); } @Override diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java index 13a6c35e9a7..602314bed96 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java @@ -220,11 +220,11 @@ public class NodeRepository extends AbstractComponent { * @param host node for which to generate ACLs * @return the list of node ACLs */ - public List<NodeAcl> getChildAcls(Node host, boolean simplerAcl) { + public List<NodeAcl> getChildAcls(Node host) { if ( ! host.type().isHost()) throw new IllegalArgumentException("Only hosts have children"); NodeList allNodes = nodes().list(); return allNodes.childrenOf(host) - .mapToList(childNode -> childNode.acl(allNodes, loadBalancers, zone, simplerAcl)); + .mapToList(childNode -> childNode.acl(allNodes, loadBalancers, zone)); } /** Removes this application: all nodes are set dirty. */ diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java index c207e3c7ecc..32b59319a88 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java @@ -22,7 +22,7 @@ public class Autoscaler { /** What resource difference is worth a reallocation? */ private static final double resourceIncreaseWorthReallocation = 0.03; /** The load increase headroom (as a fraction) we should have before needing to scale up, to decide to scale down */ - static final double headroomRequiredToScaleDown = 0.1; + static final double headroomRequiredToScaleDown = 0.15; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java index 585a7f341b5..5673b2d74ea 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.maintenance; +import ai.vespa.metrics.ConfigServerMetrics; import com.yahoo.concurrent.UncheckedTimeoutException; import com.yahoo.config.provision.Deployer; import com.yahoo.config.provision.Deployment; @@ -45,10 +46,10 @@ public class NodeFailer extends NodeRepositoryMaintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); /** Metric for number of hosts that we want to fail, but cannot due to throttling */ - static final String throttledHostFailuresMetric = "throttledHostFailures"; + static final String throttledHostFailuresMetric = ConfigServerMetrics.THROTTLED_HOST_FAILURES.baseName(); /** Metric for number of nodes that we want to fail, but cannot due to throttling */ - static final String throttledNodeFailuresMetric = "throttledNodeFailures"; + static final String throttledNodeFailuresMetric = ConfigServerMetrics.THROTTLED_NODE_FAILURES.baseName(); /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ static final String throttlingActiveMetric = "nodeFailThrottling"; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java index dcdcbf09175..da05656fcee 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java @@ -1,6 +1,7 @@ // Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.provision.maintenance; +import ai.vespa.metrics.ConfigServerMetrics; import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.config.provision.Deployer; @@ -75,7 +76,7 @@ public class SpareCapacityMaintainer extends NodeRepositoryMaintainer { CapacityChecker capacityChecker = new CapacityChecker(allNodes); List<Node> overcommittedHosts = capacityChecker.findOvercommittedHosts(); - metric.set("overcommittedHosts", overcommittedHosts.size(), null); + metric.set(ConfigServerMetrics.OVERCOMMITTED_HOSTS.baseName(), overcommittedHosts.size(), null); retireOvercommitedHosts(allNodes, overcommittedHosts); boolean success = true; @@ -93,7 +94,7 @@ public class SpareCapacityMaintainer extends NodeRepositoryMaintainer { success = false; } } - metric.set("spareHostCapacity", spareHostCapacity, null); + metric.set(ConfigServerMetrics.SPARE_HOST_CAPACITY.baseName(), spareHostCapacity, null); } return success ? 1.0 : 0.0; } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java index 7df19c97659..d0e72cea8fc 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java @@ -45,12 +45,12 @@ public record NodeAcl(Node node, this.trustedUdpPorts = ImmutableSet.copyOf(Objects.requireNonNull(trustedUdpPorts, "trustedUdpPorts must be non-null")); } - public static NodeAcl from(Node node, NodeList allNodes, LoadBalancers loadBalancers, Zone zone, boolean simplerAcl) { + public static NodeAcl from(Node node, NodeList allNodes, LoadBalancers loadBalancers, Zone zone) { Set<TrustedNode> trustedNodes = new TreeSet<>(Comparator.comparing(TrustedNode::hostname)); Set<Integer> trustedPorts = new LinkedHashSet<>(); Set<Integer> trustedUdpPorts = new LinkedHashSet<>(); Set<String> trustedNetworks = new LinkedHashSet<>(); - IP.Space ipSpace = simplerAcl ? IP.Space.of(zone, node.cloudAccount()) : (ip, account) -> true; + IP.Space ipSpace = IP.Space.of(zone, node.cloudAccount()); // For all cases below, trust: // - SSH: If the host has one container, and it is using the host's network namespace, diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java index 784f8f82d14..6fe14715355 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeAclResponse.java @@ -4,7 +4,6 @@ package com.yahoo.vespa.hosted.provision.restapi; import com.yahoo.container.jdisc.HttpRequest; import com.yahoo.restapi.SlimeJsonResponse; import com.yahoo.slime.Cursor; -import com.yahoo.vespa.flags.Flags; import com.yahoo.vespa.hosted.provision.Node; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.node.NodeAcl; @@ -34,9 +33,8 @@ public class NodeAclResponse extends SlimeJsonResponse { Node node = nodeRepository.nodes().node(hostname) .orElseThrow(() -> new NotFoundException("No node with hostname '" + hostname + "'")); - boolean simplerAcl = Flags.SIMPLER_ACL.bindTo(nodeRepository.flagSource()).value(); - List<NodeAcl> acls = aclsForChildren ? nodeRepository.getChildAcls(node, simplerAcl) : - List.of(node.acl(nodeRepository.nodes().list(), nodeRepository.loadBalancers(), nodeRepository.zone(), simplerAcl)); + List<NodeAcl> acls = aclsForChildren ? nodeRepository.getChildAcls(node) : + List.of(node.acl(nodeRepository.nodes().list(), nodeRepository.loadBalancers(), nodeRepository.zone())); Cursor trustedNodesArray = object.setArray("trustedNodes"); acls.forEach(nodeAcl -> toSlime(nodeAcl, trustedNodesArray)); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java index 091060413a9..d33857d1a1e 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java @@ -88,7 +88,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(Duration.ofDays(7)); fixture.loader().applyCpuLoad(0.1f, 10); fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly", - 6, 1, 1.1, 9.6, 381.5, + 6, 1, 1.1, 9.8, 390.2, fixture.autoscale()); } @@ -666,7 +666,7 @@ public class AutoscalingTest { fixture.tester().clock().advance(Duration.ofHours(12 * 3 + 1)); fixture.loader().applyCpuLoad(0.02, 5); fixture.tester().assertResources("Scaling down since enough time has passed", - 3, 1, 1.0, 26, 111.5, + 5, 1, 1.0, 12.3, 50.7, fixture.autoscale()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java index 0a3e8024a97..379dbb27d87 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java @@ -85,7 +85,7 @@ public class AutoscalingUsingBcpGroupInfoTest { fixture.store(new BcpGroupInfo(100, 1.1, 0.3)); fixture.loader().addCpuMeasurements(0.7f, 10); fixture.tester().assertResources("Scaling up cpu using bcp group cpu info", - 3, 3, 10.5, 42.2, 185.8, + 3, 3, 10.5, 43.2, 190.0, fixture.autoscale()); // Higher query rate @@ -93,7 +93,7 @@ public class AutoscalingUsingBcpGroupInfoTest { fixture.store(new BcpGroupInfo(200, 1.1, 0.3)); fixture.loader().addCpuMeasurements(0.7f, 10); fixture.tester().assertResources("Scaling up cpu using bcp group cpu info", - 3, 3, 20.9, 42.2, 185.8, + 3, 3, 20.9, 43.2, 190.0, fixture.autoscale()); // Higher headroom @@ -101,7 +101,7 @@ public class AutoscalingUsingBcpGroupInfoTest { fixture.store(new BcpGroupInfo(100, 1.3, 0.3)); fixture.loader().addCpuMeasurements(0.7f, 10); fixture.tester().assertResources("Scaling up cpu using bcp group cpu info", - 3, 3, 12.4, 42.2, 185.8, + 3, 3, 12.4, 43.2, 190.0, fixture.autoscale()); // Higher per query cost @@ -109,7 +109,7 @@ public class AutoscalingUsingBcpGroupInfoTest { fixture.store(new BcpGroupInfo(100, 1.1, 0.45)); fixture.loader().addCpuMeasurements(0.7f, 10); fixture.tester().assertResources("Scaling up cpu using bcp group cpu info", - 3, 3, 15.7, 42.2, 185.8, + 3, 3, 15.7, 43.2, 190.0, fixture.autoscale()); } diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java index 26925372b93..94014712930 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java @@ -58,7 +58,7 @@ public class AclProvisioningTest { // Get trusted nodes for the first active node Node node = activeNodes.get(0); List<Node> hostOfNode = node.parentHostname().flatMap(tester.nodeRepository().nodes()::node).map(List::of).orElseGet(List::of); - Supplier<NodeAcl> nodeAcls = () -> node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone(), true); + Supplier<NodeAcl> nodeAcls = () -> node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone()); // Trusted nodes are active nodes in same application, proxy nodes and config servers assertAcls(trustedNodesOf(List.of(activeNodes, proxyNodes, configServers.asList(), hostOfNode), node.cloudAccount()), @@ -83,7 +83,7 @@ public class AclProvisioningTest { // Get trusted nodes for a parked tenant node Node node = tester.nodeRepository().nodes().list(Node.State.parked).nodeType(NodeType.tenant).first().get(); - NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone(), true); + NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone()); // Trusted nodes are all config-nodes assertAcls(trustedNodesOf(List.of(proxyNodes, configServers.asList()), node.cloudAccount()), List.of(nodeAcl)); @@ -108,7 +108,7 @@ public class AclProvisioningTest { // Get trusted nodes for the first config server Node node = tester.nodeRepository().nodes().node("cfg1") .orElseThrow(() -> new RuntimeException("Failed to find cfg1")); - NodeAcl nodeAcl = node.acl(nodes, tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone(), true); + NodeAcl nodeAcl = node.acl(nodes, tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone()); // Trusted nodes is all tenant nodes, all proxy nodes, all config servers and load balancer subnets // All tenant hosts because nodes are IPv6 and cfg are IPv4, so traffic is NATed. @@ -128,7 +128,7 @@ public class AclProvisioningTest { publicTester.makeConfigServers(3, "default", Version.fromString("6.123.456")); Node publicCfgNode = publicTester.nodeRepository().nodes().node("cfg1") .orElseThrow(() -> new RuntimeException("Failed to find cfg1")); - NodeAcl publicNodeAcl = publicCfgNode.acl(nodes, publicTester.nodeRepository().loadBalancers(), publicTester.nodeRepository().zone(), true); + NodeAcl publicNodeAcl = publicCfgNode.acl(nodes, publicTester.nodeRepository().loadBalancers(), publicTester.nodeRepository().zone()); assertEquals(Set.of(51820), publicNodeAcl.trustedUdpPorts()); } @@ -146,7 +146,7 @@ public class AclProvisioningTest { // Get trusted nodes for first proxy node NodeList proxyNodes = tester.nodeRepository().nodes().list().nodeType(NodeType.proxy); Node node = proxyNodes.first().get(); - NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone(), true); + NodeAcl nodeAcl = node.acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone()); // Trusted nodes is all config servers and all proxy nodes assertAcls(trustedNodesOf(List.of(proxyNodes.asList(), configServers.asList()), node.cloudAccount()), List.of(nodeAcl)); @@ -164,7 +164,7 @@ public class AclProvisioningTest { List<Node> nodes = tester.makeReadyChildren(5, new NodeResources(1, 4, 10, 1), host.hostname()); - List<NodeAcl> acls = tester.nodeRepository().getChildAcls(host, true); + List<NodeAcl> acls = tester.nodeRepository().getChildAcls(host); // ACLs for each container on the host assertFalse(nodes.isEmpty()); @@ -188,7 +188,7 @@ public class AclProvisioningTest { List<Node> controllers = tester.nodeRepository().nodes().list().nodeType(NodeType.controller).asList(); // Controllers and hosts all trust each other - NodeAcl controllerAcl = controllers.get(0).acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone(), true); + NodeAcl controllerAcl = controllers.get(0).acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone()); assertAcls(trustedNodesOf(List.of(controllers), controllers.get(0).cloudAccount()), Set.of("10.2.3.0/24", "10.4.5.0/24"), List.of(controllerAcl)); assertEquals(Set.of(22, 4443, 443), controllerAcl.trustedPorts()); assertEquals(Set.of(), controllerAcl.trustedUdpPorts()); @@ -217,7 +217,7 @@ public class AclProvisioningTest { // ACL for nodes with allocation trust their respective load balancer networks, if any for (var host : hosts) { - List<NodeAcl> acls = tester.nodeRepository().getChildAcls(host, true); + List<NodeAcl> acls = tester.nodeRepository().getChildAcls(host); assertEquals(2, acls.size()); for (var acl : acls) { if (acl.node().allocation().isPresent()) { @@ -235,7 +235,7 @@ public class AclProvisioningTest { tester.makeConfigServers(3, "default", Version.fromString("6.123.456")); List<Node> readyNodes = tester.makeReadyNodes(1, "default", NodeType.proxy); - NodeAcl nodeAcl = readyNodes.get(0).acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone(), true); + NodeAcl nodeAcl = readyNodes.get(0).acl(tester.nodeRepository().nodes().list(), tester.nodeRepository().loadBalancers(), tester.nodeRepository().zone()); assertEquals(3, nodeAcl.trustedNodes().size()); assertEquals(List.of(Set.of("127.0.1.1"), Set.of("127.0.1.2"), Set.of("127.0.1.3")), diff --git a/parent/pom.xml b/parent/pom.xml index d624bb2cd8c..df20b94ec79 100644 --- a/parent/pom.xml +++ b/parent/pom.xml @@ -887,6 +887,16 @@ <version>${opennlp.vespa.version}</version> </dependency> <dependency> + <groupId>org.apache.lucene</groupId> + <artifactId>lucene-core</artifactId> + <version>${lucene.vespa.version}</version> + </dependency> + <dependency> + <groupId>org.apache.lucene</groupId> + <artifactId>lucene-analysis-common</artifactId> + <version>${lucene.vespa.version}</version> + </dependency> + <dependency> <groupId>org.apache.velocity</groupId> <artifactId>velocity-engine-core</artifactId> <version>2.3</version> @@ -95,6 +95,7 @@ <module>linguistics-components</module> <module>logd</module> <module>logserver</module> + <module>lucene-linguistics</module> <module>messagebus</module> <module>metrics</module> <module>metrics-proxy</module> diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt index 55b94307df6..7684e3ea2ae 100644 --- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt +++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt @@ -97,6 +97,8 @@ org.apache.httpcomponents:httpmime:4.5.14 org.apache.httpcomponents.client5:httpclient5:5.2.1 org.apache.httpcomponents.core5:httpcore5:5.2.2 org.apache.httpcomponents.core5:httpcore5-h2:5.2.2 +org.apache.lucene:lucene-analysis-common:9.7.0 +org.apache.lucene:lucene-core:9.7.0 org.apache.maven:maven-archiver:3.6.0 org.apache.maven:maven-artifact:3.8.7 org.apache.maven:maven-artifact-manager:2.2.1 |