summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java273
-rw-r--r--controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java174
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java4
-rw-r--r--messagebus/src/vespa/messagebus/network/rpctarget.cpp9
-rw-r--r--messagebus/src/vespa/messagebus/network/rpctarget.h8
-rw-r--r--messagebus/src/vespa/messagebus/network/rpctargetpool.cpp2
-rw-r--r--storage/src/tests/distributor/distributor_bucket_space_test.cpp6
-rw-r--r--storage/src/tests/distributor/pendingmessagetrackertest.cpp6
-rw-r--r--storage/src/tests/distributor/statecheckerstest.cpp47
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp13
-rw-r--r--storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h17
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp63
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h56
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp17
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h12
-rw-r--r--storage/src/vespa/storage/distributor/messagetracker.h4
-rw-r--r--storage/src/vespa/storage/distributor/nodeinfo.cpp26
-rw-r--r--storage/src/vespa/storage/distributor/nodeinfo.h14
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/pendingmessagetracker.cpp35
-rw-r--r--storage/src/vespa/storage/distributor/pendingmessagetracker.h11
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.cpp57
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.h1
-rw-r--r--storage/src/vespa/storage/distributor/statechecker.cpp12
-rw-r--r--storage/src/vespa/storage/distributor/statechecker.h54
-rw-r--r--storage/src/vespa/storage/distributor/statecheckers.cpp220
-rw-r--r--storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp15
-rw-r--r--storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h3
-rw-r--r--vdslib/src/vespa/vdslib/distribution/distribution.cpp9
-rw-r--r--vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp3
35 files changed, 575 insertions, 620 deletions
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java
index 7403f0a1b01..fbf3a5d9a03 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java
@@ -142,7 +142,7 @@ public interface FlagsTarget {
var fetchVector = new FetchVector();
if (!flagDimensions.contains(CLOUD)) fetchVector = fetchVector.with(CLOUD, cloud.value());
if (!flagDimensions.contains(ENVIRONMENT)) fetchVector = fetchVector.with(ENVIRONMENT, virtualZoneId.environment().value());
- if (!flagDimensions.contains(SYSTEM)) fetchVector = fetchVector.with(SYSTEM, system.value());
+ fetchVector = fetchVector.with(SYSTEM, system.value());
if (!flagDimensions.contains(ZONE_ID)) fetchVector = fetchVector.with(ZONE_ID, virtualZoneId.value());
return fetchVector.isEmpty() ? data : data.partialResolve(fetchVector);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java
index 577769baf1e..c6f1d96ed43 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.systemflags.v1;
+import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
@@ -18,8 +19,10 @@ import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.text.JSON;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagId;
+import com.yahoo.vespa.flags.json.Condition;
import com.yahoo.vespa.flags.json.DimensionHelper;
import com.yahoo.vespa.flags.json.FlagData;
+import com.yahoo.vespa.flags.json.RelationalCondition;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
import java.io.BufferedInputStream;
@@ -49,6 +52,7 @@ import java.util.zip.ZipOutputStream;
import static com.yahoo.config.provision.CloudName.AWS;
import static com.yahoo.config.provision.CloudName.GCP;
import static com.yahoo.config.provision.CloudName.YAHOO;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.SYSTEM;
import static com.yahoo.yolean.Exceptions.uncheck;
/**
@@ -82,8 +86,8 @@ public class SystemFlagsDataArchive {
String name = entry.getName();
if (!entry.isDirectory() && name.startsWith("flags/")) {
Path filePath = Paths.get(name);
- String rawData = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8);
- addFile(builder, rawData, filePath, zoneRegistry, true);
+ String fileContent = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8);
+ builder.maybeAddFile(filePath, fileContent, zoneRegistry, true);
}
}
return builder.build();
@@ -92,7 +96,7 @@ public class SystemFlagsDataArchive {
}
}
- public static SystemFlagsDataArchive fromDirectory(Path directory, ZoneRegistry zoneRegistry, boolean forceAddFiles) {
+ public static SystemFlagsDataArchive fromDirectory(Path directory, ZoneRegistry zoneRegistry, boolean simulateInController) {
Path root = directory.toAbsolutePath();
Path flagsDirectory = directory.resolve("flags");
if (!Files.isDirectory(flagsDirectory)) {
@@ -103,8 +107,8 @@ public class SystemFlagsDataArchive {
directoryStream.forEach(path -> {
Path relativePath = root.relativize(path.toAbsolutePath());
if (Files.isRegularFile(path)) {
- String rawData = uncheck(() -> Files.readString(path, StandardCharsets.UTF_8));
- addFile(builder, rawData, relativePath, zoneRegistry, forceAddFiles);
+ String fileContent = uncheck(() -> Files.readString(path, StandardCharsets.UTF_8));
+ builder.maybeAddFile(relativePath, fileContent, zoneRegistry, simulateInController);
}
});
return builder.build();
@@ -168,114 +172,119 @@ public class SystemFlagsDataArchive {
return files.getOrDefault(flagId, Map.of()).containsKey(filename);
}
- private static void addFile(Builder builder, String rawData, Path filePath, ZoneRegistry zoneRegistry, boolean force) {
- String filename = filePath.getFileName().toString();
-
- if (filename.startsWith("."))
- return; // Ignore files starting with '.'
-
- if (!force && !FlagsTarget.filenameForSystem(filename, zoneRegistry.system()))
- return; // Ignore files for other systems
-
- FlagId directoryDeducedFlagId = new FlagId(filePath.getName(filePath.getNameCount()-2).toString());
- FlagData flagData;
- if (rawData.isBlank()) {
- flagData = new FlagData(directoryDeducedFlagId);
- } else {
- Set<ZoneId> zones = force ? Stream.concat(Stream.of(ZoneId.ofVirtualControllerZone()),
- zoneRegistry.zones().all().zones().stream().map(ZoneApi::getVirtualId))
- .collect(Collectors.toSet())
- : Set.of();
- String normalizedRawData = normalizeJson(rawData, zones);
- flagData = FlagData.deserialize(normalizedRawData);
- if (!directoryDeducedFlagId.equals(flagData.id())) {
- throw new FlagValidationException("Flag data file with flag id '%s' in directory for '%s'"
- .formatted(flagData.id(), directoryDeducedFlagId.toString()));
- }
-
- String serializedData = flagData.serializeToJson();
- if (!JSON.equals(serializedData, normalizedRawData)) {
- throw new FlagValidationException("""
- %s contains unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
- %s
- but deserializing this ended up with:
- %s
- These fields may be spelled wrong, or remove them?
- See https://git.ouroath.com/vespa/hosted-feature-flags for more info on the JSON syntax
- """.formatted(filePath, normalizedRawData, serializedData));
- }
- }
-
- if (builder.hasFile(filename, flagData)) {
- throw new FlagValidationException("Flag data file in '%s' contains redundant flag data for id '%s' already set in another directory!"
- .formatted(filePath, flagData.id()));
- }
-
- builder.addFile(filename, flagData);
- }
-
- static String normalizeJson(String json, Set<ZoneId> zones) {
- JsonNode root = uncheck(() -> mapper.readTree(json));
- removeCommentsRecursively(root);
- removeNullRuleValues(root);
- verifyValues(root, zones);
- return root.toString();
- }
-
- private static void verifyValues(JsonNode root, Set<ZoneId> zones) {
- var cursor = new JsonAccessor(root);
- cursor.get("rules").forEachArrayElement(rule -> rule.get("conditions").forEachArrayElement(condition -> {
- FetchVector.Dimension dimension = DimensionHelper
- .fromWire(condition.get("dimension")
- .asString()
- .orElseThrow(() -> new FlagValidationException("Invalid dimension in condition: " + condition)));
- switch (dimension) {
- case APPLICATION_ID -> validateStringValues(condition, ApplicationId::fromSerializedForm);
- case CONSOLE_USER_EMAIL -> validateStringValues(condition, email -> {});
- case CLOUD -> validateStringValues(condition, cloud -> {
- if (!Set.of(YAHOO, AWS, GCP).contains(CloudName.from(cloud)))
- throw new FlagValidationException("Unknown cloud: " + cloud);
- });
- case CLUSTER_ID -> validateStringValues(condition, ClusterSpec.Id::from);
- case CLUSTER_TYPE -> validateStringValues(condition, ClusterSpec.Type::from);
- case ENVIRONMENT -> validateStringValues(condition, Environment::from);
- case HOSTNAME -> validateStringValues(condition, HostName::of);
- case NODE_TYPE -> validateStringValues(condition, NodeType::valueOf);
- case SYSTEM -> validateStringValues(condition, system -> {
+ private static void validateSystems(FlagData flagData) throws FlagValidationException {
+ flagData.rules().forEach(rule -> rule.conditions().forEach(condition -> {
+ if (condition.dimension() == SYSTEM) {
+ validateConditionValues(condition, system -> {
if (!SystemName.hostedVespa().contains(SystemName.from(system)))
throw new FlagValidationException("Unknown system: " + system);
});
- case TENANT_ID -> validateStringValues(condition, TenantName::from);
- case VESPA_VERSION -> validateStringValues(condition, versionString -> {
- if (Version.fromString(versionString).getMajor() < 8)
- throw new FlagValidationException("Major Vespa version must be at least 8: " + versionString);
- });
- case ZONE_ID -> validateStringValues(condition, zoneIdString -> {
- ZoneId zoneId = ZoneId.from(zoneIdString);
- if (!zones.isEmpty() && !zones.contains(zoneId))
- throw new FlagValidationException("Unknown zone: " + zoneIdString);
- });
}
}));
}
- private static void validateStringValues(JsonAccessor condition, Consumer<String> valueValidator) {
- condition.get("values").forEachArrayElement(conditionValue -> {
- String value = conditionValue.asString()
- .orElseThrow(() -> {
- String dimension = condition.get("dimension").asString().orElseThrow();
- String type = condition.get("type").asString().orElseThrow();
- return new FlagValidationException("Non-string %s in %s condition: %s".formatted(
- dimension, type, conditionValue));
- });
+ private static void validateForSystem(FlagData flagData, ZoneRegistry zoneRegistry, boolean inController) throws FlagValidationException {
+ Set<ZoneId> zones = inController ?
+ zoneRegistry.zonesIncludingSystem().all().zones().stream().map(ZoneApi::getVirtualId).collect(Collectors.toSet()) :
+ null;
+
+ flagData.rules().forEach(rule -> rule.conditions().forEach(condition -> {
+ int force_switch_expression_dummy = switch (condition.type()) {
+ case RELATIONAL -> switch (condition.dimension()) {
+ case APPLICATION_ID, CLOUD, CLUSTER_ID, CLUSTER_TYPE, CONSOLE_USER_EMAIL, ENVIRONMENT,
+ HOSTNAME, NODE_TYPE, SYSTEM, TENANT_ID, ZONE_ID ->
+ throw new FlagValidationException(condition.type().toWire() + " " +
+ DimensionHelper.toWire(condition.dimension()) +
+ " condition is not supported");
+ case VESPA_VERSION -> {
+ RelationalCondition rCond = RelationalCondition.create(condition.toCreateParams());
+ Version version = Version.fromString(rCond.relationalPredicate().rightOperand());
+ if (version.getMajor() < 8)
+ throw new FlagValidationException("Major Vespa version must be at least 8: " + version);
+ yield 0;
+ }
+ };
+
+ case WHITELIST, BLACKLIST -> switch (condition.dimension()) {
+ case APPLICATION_ID -> validateConditionValues(condition, ApplicationId::fromSerializedForm);
+ case CONSOLE_USER_EMAIL -> validateConditionValues(condition, email -> {
+ if (!email.contains("@"))
+ throw new FlagValidationException("Invalid email address: " + email);
+ });
+ case CLOUD -> validateConditionValues(condition, cloud -> {
+ if (!Set.of(YAHOO, AWS, GCP).contains(CloudName.from(cloud)))
+ throw new FlagValidationException("Unknown cloud: " + cloud);
+ });
+ case CLUSTER_ID -> validateConditionValues(condition, ClusterSpec.Id::from);
+ case CLUSTER_TYPE -> validateConditionValues(condition, ClusterSpec.Type::from);
+ case ENVIRONMENT -> validateConditionValues(condition, Environment::from);
+ case HOSTNAME -> validateConditionValues(condition, HostName::of);
+ case NODE_TYPE -> validateConditionValues(condition, NodeType::valueOf);
+ case SYSTEM -> throw new IllegalStateException("Flag data contains system dimension");
+ case TENANT_ID -> validateConditionValues(condition, TenantName::from);
+ case VESPA_VERSION -> throw new FlagValidationException(condition.type().toWire() + " " +
+ DimensionHelper.toWire(condition.dimension()) +
+ " condition is not supported");
+ case ZONE_ID -> validateConditionValues(condition, zoneIdString -> {
+ ZoneId zoneId = ZoneId.from(zoneIdString);
+ if (inController && !zones.contains(zoneId))
+ throw new FlagValidationException("Unknown zone: " + zoneIdString);
+ });
+ };
+ };
+ }));
+ }
+
+ private static int validateConditionValues(Condition condition, Consumer<String> valueValidator) {
+ condition.toCreateParams().values().forEach(value -> {
try {
valueValidator.accept(value);
} catch (IllegalArgumentException e) {
- String dimension = condition.get("dimension").asString().orElseThrow();
- String type = condition.get("type").asString().orElseThrow();
+ String dimension = DimensionHelper.toWire(condition.dimension());
+ String type = condition.type().toWire();
throw new FlagValidationException("Invalid %s '%s' in %s condition: %s".formatted(dimension, value, type, e.getMessage()));
}
});
+
+ return 0; // dummy to force switch expression
+ }
+
+ private static FlagData parseFlagData(FlagId flagId, String fileContent, ZoneRegistry zoneRegistry, boolean inController) {
+ if (fileContent.isBlank()) return new FlagData(flagId);
+
+ final JsonNode root;
+ try {
+ root = mapper.readTree(fileContent);
+ } catch (JsonProcessingException e) {
+ throw new FlagValidationException("Invalid JSON: " + e.getMessage());
+ }
+
+ removeCommentsRecursively(root);
+ removeNullRuleValues(root);
+ String normalizedRawData = root.toString();
+ FlagData flagData = FlagData.deserialize(normalizedRawData);
+
+ if (!flagId.equals(flagData.id()))
+ throw new FlagValidationException("Flag ID specified in file (%s) doesn't match the directory name (%s)"
+ .formatted(flagData.id(), flagId.toString()));
+
+ String serializedData = flagData.serializeToJson();
+ if (!JSON.equals(serializedData, normalizedRawData))
+ throw new FlagValidationException("""
+ Unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
+ %s
+ but deserializing this ended up with:
+ %s
+ These fields may be spelled wrong, or remove them?
+ See https://git.ouroath.com/vespa/hosted-feature-flags for more info on the JSON syntax
+ """.formatted(normalizedRawData, serializedData));
+
+ validateSystems(flagData);
+ flagData = flagData.partialResolve(new FetchVector().with(SYSTEM, zoneRegistry.system().value()));
+
+ validateForSystem(flagData, zoneRegistry, inController);
+
+ return flagData;
}
private static void removeCommentsRecursively(JsonNode node) {
@@ -312,56 +321,46 @@ public class SystemFlagsDataArchive {
public Builder() {}
- public Builder addFile(String filename, FlagData data) {
- files.computeIfAbsent(data.id(), k -> new TreeMap<>()).put(filename, data);
- return this;
- }
+ boolean maybeAddFile(Path filePath, String fileContent, ZoneRegistry zoneRegistry, boolean inController) {
+ String filename = filePath.getFileName().toString();
- public boolean hasFile(String filename, FlagData data) {
- return files.containsKey(data.id()) && files.get(data.id()).containsKey(filename);
- }
-
- public SystemFlagsDataArchive build() {
- Map<FlagId, Map<String, FlagData>> copy = new TreeMap<>();
- files.forEach((flagId, map) -> copy.put(flagId, new TreeMap<>(map)));
- return new SystemFlagsDataArchive(copy);
- }
+ if (filename.startsWith("."))
+ return false; // Ignore files starting with '.'
- }
+ if (!inController && !FlagsTarget.filenameForSystem(filename, zoneRegistry.system()))
+ return false; // Ignore files for other systems
- private static class JsonAccessor {
- private final JsonNode jsonNode;
+ FlagId directoryDeducedFlagId = new FlagId(filePath.getName(filePath.getNameCount()-2).toString());
- public JsonAccessor(JsonNode jsonNode) {
- this.jsonNode = jsonNode;
- }
+ if (hasFile(filename, directoryDeducedFlagId))
+ throw new FlagValidationException("Flag data file in '%s' contains redundant flag data for id '%s' already set in another directory!"
+ .formatted(filePath, directoryDeducedFlagId));
- public JsonAccessor get(String fieldName) {
- if (jsonNode == null) {
- return this;
- } else {
- return new JsonAccessor(jsonNode.get(fieldName));
+ final FlagData flagData;
+ try {
+ flagData = parseFlagData(directoryDeducedFlagId, fileContent, zoneRegistry, inController);
+ } catch (FlagValidationException e) {
+ throw new FlagValidationException("In file " + filePath + ": " + e.getMessage());
}
- }
- public Optional<String> asString() {
- return jsonNode != null && jsonNode.isTextual() ? Optional.of(jsonNode.textValue()) : Optional.empty();
+ addFile(filename, flagData);
+ return true;
}
- public void forEachArrayElement(Consumer<JsonAccessor> consumer) {
- if (jsonNode != null && jsonNode.isArray()) {
- jsonNode.forEach(jsonNodeElement -> consumer.accept(new JsonAccessor(jsonNodeElement)));
- }
+ public Builder addFile(String filename, FlagData data) {
+ files.computeIfAbsent(data.id(), k -> new TreeMap<>()).put(filename, data);
+ return this;
}
- /** Returns true if this (JsonNode) is a string and equal to value. */
- public boolean isEqualTo(String value) {
- return jsonNode != null && jsonNode.isTextual() && Objects.equals(jsonNode.textValue(), value);
+ public boolean hasFile(String filename, FlagId id) {
+ return files.containsKey(id) && files.get(id).containsKey(filename);
}
- @Override
- public String toString() {
- return jsonNode == null ? "undefined" : jsonNode.toString();
+ public SystemFlagsDataArchive build() {
+ Map<FlagId, Map<String, FlagData>> copy = new TreeMap<>();
+ files.forEach((flagId, map) -> copy.put(flagId, new TreeMap<>(map)));
+ return new SystemFlagsDataArchive(copy);
}
+
}
}
diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java
index 2d0374dc888..759f21579d4 100644
--- a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java
+++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java
@@ -14,6 +14,7 @@ import com.yahoo.vespa.athenz.api.AthenzService;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagId;
import com.yahoo.vespa.flags.RawFlag;
+import com.yahoo.vespa.flags.json.Condition;
import com.yahoo.vespa.flags.json.FlagData;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
import org.junit.jupiter.api.Test;
@@ -28,10 +29,12 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
+import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Optional;
import java.util.Set;
+import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -78,11 +81,11 @@ public class SystemFlagsDataArchiveTest {
can_serialize_and_deserialize_archive(true);
}
- private void can_serialize_and_deserialize_archive(boolean forceAddFiles) throws IOException {
+ private void can_serialize_and_deserialize_archive(boolean simulateInController) throws IOException {
File tempFile = File.createTempFile("serialized-flags-archive", null, temporaryFolder);
try (OutputStream out = new BufferedOutputStream(new FileOutputStream(tempFile))) {
- var archive = fromDirectory("system-flags", forceAddFiles);
- if (forceAddFiles)
+ var archive = fromDirectory("system-flags", simulateInController);
+ if (simulateInController)
archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
archive.toZip(out);
}
@@ -98,9 +101,9 @@ public class SystemFlagsDataArchiveTest {
retrieves_correct_flag_data_for_target(true);
}
- private void retrieves_correct_flag_data_for_target(boolean forceAddFiles) {
- var archive = fromDirectory("system-flags", forceAddFiles);
- if (forceAddFiles)
+ private void retrieves_correct_flag_data_for_target(boolean simulateInController) {
+ var archive = fromDirectory("system-flags", simulateInController);
+ if (simulateInController)
archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
assertArchiveReturnsCorrectTestFlagDataForTarget(archive);
}
@@ -111,9 +114,9 @@ public class SystemFlagsDataArchiveTest {
supports_multi_level_flags_directory(true);
}
- private void supports_multi_level_flags_directory(boolean forceAddFiles) {
- var archive = fromDirectory("system-flags-multi-level", forceAddFiles);
- if (forceAddFiles)
+ private void supports_multi_level_flags_directory(boolean simulateInController) {
+ var archive = fromDirectory("system-flags-multi-level", simulateInController);
+ if (simulateInController)
archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default");
}
@@ -124,9 +127,9 @@ public class SystemFlagsDataArchiveTest {
duplicated_flagdata_is_detected(true);
}
- private void duplicated_flagdata_is_detected(boolean forceAddFiles) {
+ private void duplicated_flagdata_is_detected(boolean simulateInController) {
Throwable exception = assertThrows(FlagValidationException.class, () -> {
- fromDirectory("system-flags-multi-level-with-duplicated-flagdata", forceAddFiles);
+ fromDirectory("system-flags-multi-level-with-duplicated-flagdata", simulateInController);
});
assertTrue(exception.getMessage().contains("contains redundant flag data for id 'my-test-flag' already set in another directory!"));
}
@@ -137,9 +140,9 @@ public class SystemFlagsDataArchiveTest {
empty_files_are_handled_as_no_flag_data_for_target(true);
}
- private void empty_files_are_handled_as_no_flag_data_for_target(boolean forceAddFiles) {
- var archive = fromDirectory("system-flags", forceAddFiles);
- if (forceAddFiles)
+ private void empty_files_are_handled_as_no_flag_data_for_target(boolean simulateInController) {
+ var archive = fromDirectory("system-flags", simulateInController);
+ if (simulateInController)
archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, mainControllerTarget);
assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, prodUsWestCfgTarget, "main.prod.us-west-1");
@@ -187,7 +190,7 @@ public class SystemFlagsDataArchiveTest {
fromDirectory("system-flags-with-unknown-field-name", true);
});
assertEquals("""
- flags/my-test-flag/main.prod.us-west-1.json contains unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
+ In file flags/my-test-flag/main.prod.us-west-1.json: Unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
{"id":"my-test-flag","rules":[{"condition":[{"type":"whitelist","dimension":"hostname","values":["foo.com"]}],"value":"default"}]}
but deserializing this ended up with:
{"id":"my-test-flag","rules":[{"value":"default"}]}
@@ -218,6 +221,7 @@ public class SystemFlagsDataArchiveTest {
void remove_comments_and_null_value_in_rules() {
assertTrue(JSON.equals("""
{
+ "id": "foo",
"rules": [
{
"conditions": [
@@ -249,8 +253,9 @@ public class SystemFlagsDataArchiveTest {
}
]
}""",
- SystemFlagsDataArchive.normalizeJson("""
+ normalizeJson("""
{
+ "id": "foo",
"comment": "bar",
"rules": [
{
@@ -289,83 +294,91 @@ public class SystemFlagsDataArchiveTest {
"value": true
}
]
- }""", Set.of(ZoneId.from("prod.us-west-1")))));
+ }""")));
+ }
+
+ private static String normalizeJson(String json) {
+ SystemFlagsDataArchive.Builder builder = new SystemFlagsDataArchive.Builder();
+ assertTrue(builder.maybeAddFile(Path.of("flags/temporary/foo/default.json"), json, createZoneRegistryMock(), true));
+ List<FlagData> flagData = builder.build().flagData(prodUsWestCfgTarget);
+ assertEquals(1, flagData.size());
+ return JSON.canonical(flagData.get(0).serializeToJson());
}
@Test
void normalize_json_succeed_on_valid_values() {
- normalizeJson("application", "\"a:b:c\"");
- normalizeJson("cloud", "\"yahoo\"");
- normalizeJson("cloud", "\"aws\"");
- normalizeJson("cloud", "\"gcp\"");
- normalizeJson("cluster-id", "\"some-id\"");
- normalizeJson("cluster-type", "\"admin\"");
- normalizeJson("cluster-type", "\"container\"");
- normalizeJson("cluster-type", "\"content\"");
- normalizeJson("console-user-email", "\"name@domain.com\"");
- normalizeJson("environment", "\"prod\"");
- normalizeJson("environment", "\"staging\"");
- normalizeJson("environment", "\"test\"");
- normalizeJson("hostname", "\"2080046-v6-11.ostk.bm2.prod.gq1.yahoo.com\"");
- normalizeJson("node-type", "\"tenant\"");
- normalizeJson("node-type", "\"host\"");
- normalizeJson("node-type", "\"config\"");
- normalizeJson("node-type", "\"host\"");
- normalizeJson("system", "\"main\"");
- normalizeJson("system", "\"public\"");
- normalizeJson("tenant", "\"vespa\"");
- normalizeJson("vespa-version", "\"8.201.13\"");
- normalizeJson("zone", "\"prod.us-west-1\"", Set.of(ZoneId.from("prod.us-west-1")));
- }
-
- private void normalizeJson(String dimension, String jsonValue) {
- normalizeJson(dimension, jsonValue, Set.of());
- }
-
- private void normalizeJson(String dimension, String jsonValue, Set<ZoneId> zones) {
- SystemFlagsDataArchive.normalizeJson("""
+ addFile(Condition.Type.WHITELIST, "application", "a:b:c");
+ addFile(Condition.Type.WHITELIST, "cloud", "yahoo");
+ addFile(Condition.Type.WHITELIST, "cloud", "aws");
+ addFile(Condition.Type.WHITELIST, "cloud", "gcp");
+ addFile(Condition.Type.WHITELIST, "cluster-id", "some-id");
+ addFile(Condition.Type.WHITELIST, "cluster-type", "admin");
+ addFile(Condition.Type.WHITELIST, "cluster-type", "container");
+ addFile(Condition.Type.WHITELIST, "cluster-type", "content");
+ addFile(Condition.Type.WHITELIST, "console-user-email", "name@domain.com");
+ addFile(Condition.Type.WHITELIST, "environment", "prod");
+ addFile(Condition.Type.WHITELIST, "environment", "staging");
+ addFile(Condition.Type.WHITELIST, "environment", "test");
+ addFile(Condition.Type.WHITELIST, "hostname", "2080046-v6-11.ostk.bm2.prod.gq1.yahoo.com");
+ addFile(Condition.Type.WHITELIST, "node-type", "tenant");
+ addFile(Condition.Type.WHITELIST, "node-type", "host");
+ addFile(Condition.Type.WHITELIST, "node-type", "config");
+ addFile(Condition.Type.WHITELIST, "node-type", "host");
+ addFile(Condition.Type.WHITELIST, "system", "main");
+ addFile(Condition.Type.WHITELIST, "system", "public");
+ addFile(Condition.Type.WHITELIST, "tenant", "vespa");
+ addFile(Condition.Type.RELATIONAL, "vespa-version", ">=8.201.13");
+ addFile(Condition.Type.WHITELIST, "zone", "prod.us-west-1");
+ }
+
+ private void addFile(Condition.Type type, String dimension, String jsonValue) {
+ SystemFlagsDataArchive.Builder builder = new SystemFlagsDataArchive.Builder();
+
+ String valuesField = type == Condition.Type.RELATIONAL ?
+ "\"predicate\": \"%s\"".formatted(jsonValue) :
+ "\"values\": [ \"%s\" ]".formatted(jsonValue);
+
+ assertTrue(builder.maybeAddFile(Path.of("flags/temporary/foo/default.json"), """
{
"id": "foo",
"rules": [
{
"conditions": [
{
- "type": "whitelist",
+ "type": "%s",
"dimension": "%s",
- "values": [ %s ]
+ %s
}
],
"value": true
}
]
}
- """.formatted(dimension, jsonValue), zones);
+ """.formatted(type.toWire(), dimension, valuesField),
+ createZoneRegistryMock(),
+ true));
}
@Test
void normalize_json_fail_on_invalid_values() {
- failNormalizeJson("application", "\"a.b.c\"", "Invalid application 'a.b.c' in whitelist condition: Application ids must be on the form tenant:application:instance, but was a.b.c");
- failNormalizeJson("cloud", "\"foo\"", "Unknown cloud: foo");
- // failNormalizeJson("cluster-id", ... any String is valid
- failNormalizeJson("cluster-type", "\"foo\"", "Invalid cluster-type 'foo' in whitelist condition: Illegal cluster type 'foo'");
- failNormalizeJson("console-user-email", "123", "Non-string console-user-email in whitelist condition: 123");
- failNormalizeJson("environment", "\"foo\"", "Invalid environment 'foo' in whitelist condition: 'foo' is not a valid environment identifier");
- failNormalizeJson("hostname", "\"not:a:hostname\"", "Invalid hostname 'not:a:hostname' in whitelist condition: hostname must match '(([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.?', but got: 'not:a:hostname'");
- failNormalizeJson("node-type", "\"footype\"", "Invalid node-type 'footype' in whitelist condition: No enum constant com.yahoo.config.provision.NodeType.footype");
- failNormalizeJson("system", "\"bar\"", "Invalid system 'bar' in whitelist condition: 'bar' is not a valid system");
- failNormalizeJson("tenant", "123", "Non-string tenant in whitelist condition: 123");
- failNormalizeJson("vespa-version", "\"not-a-version\"", "Invalid vespa-version 'not-a-version' in whitelist condition: Invalid version component in 'not-a-version'");
- failNormalizeJson("zone", "\"dev.%illegal\"", Set.of(ZoneId.from("prod.example-region")), "Invalid zone 'dev.%illegal' in whitelist condition: region name must match '[a-z]([a-z0-9-]*[a-z0-9])*', but got: '%illegal'");
- failNormalizeJson("zone", "\"dev.non-existing-zone\"", Set.of(ZoneId.from("prod.example-region")), "Unknown zone: dev.non-existing-zone");
- }
-
- private void failNormalizeJson(String dimension, String jsonValue, String expectedExceptionMessage) {
- failNormalizeJson(dimension, jsonValue, Set.of(), expectedExceptionMessage);
- }
-
- private void failNormalizeJson(String dimension, String jsonValue, Set<ZoneId> zones, String expectedExceptionMessage) {
+ failAddFile(Condition.Type.WHITELIST, "application", "a.b.c", "In file flags/temporary/foo/default.json: Invalid application 'a.b.c' in whitelist condition: Application ids must be on the form tenant:application:instance, but was a.b.c");
+ failAddFile(Condition.Type.WHITELIST, "cloud", "foo", "In file flags/temporary/foo/default.json: Unknown cloud: foo");
+ // cluster-id: any String is valid
+ failAddFile(Condition.Type.WHITELIST, "cluster-type", "foo", "In file flags/temporary/foo/default.json: Invalid cluster-type 'foo' in whitelist condition: Illegal cluster type 'foo'");
+ failAddFile(Condition.Type.WHITELIST, "console-user-email", "not-valid-email-address", "In file flags/temporary/foo/default.json: Invalid email address: not-valid-email-address");
+ failAddFile(Condition.Type.WHITELIST, "environment", "foo", "In file flags/temporary/foo/default.json: Invalid environment 'foo' in whitelist condition: 'foo' is not a valid environment identifier");
+ failAddFile(Condition.Type.WHITELIST, "hostname", "not:a:hostname", "In file flags/temporary/foo/default.json: Invalid hostname 'not:a:hostname' in whitelist condition: hostname must match '(([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.?', but got: 'not:a:hostname'");
+ failAddFile(Condition.Type.WHITELIST, "node-type", "footype", "In file flags/temporary/foo/default.json: Invalid node-type 'footype' in whitelist condition: No enum constant com.yahoo.config.provision.NodeType.footype");
+ failAddFile(Condition.Type.WHITELIST, "system", "bar", "In file flags/temporary/foo/default.json: Invalid system 'bar' in whitelist condition: 'bar' is not a valid system");
+ failAddFile(Condition.Type.WHITELIST, "tenant", "a tenant", "In file flags/temporary/foo/default.json: Invalid tenant 'a tenant' in whitelist condition: tenant name must match '[a-zA-Z0-9_-]{1,256}', but got: 'a tenant'");
+ failAddFile(Condition.Type.WHITELIST, "vespa-version", "not-a-version", "In file flags/temporary/foo/default.json: whitelist vespa-version condition is not supported");
+ failAddFile(Condition.Type.RELATIONAL, "vespa-version", ">7.1.2", "In file flags/temporary/foo/default.json: Major Vespa version must be at least 8: 7.1.2");
+ failAddFile(Condition.Type.WHITELIST, "zone", "dev.%illegal", "In file flags/temporary/foo/default.json: Invalid zone 'dev.%illegal' in whitelist condition: region name must match '[a-z]([a-z0-9-]*[a-z0-9])*', but got: '%illegal'");
+ }
+
+ private void failAddFile(Condition.Type type, String dimension, String jsonValue, String expectedExceptionMessage) {
try {
- normalizeJson(dimension, jsonValue, zones);
+ addFile(type, dimension, jsonValue);
fail();
} catch (RuntimeException e) {
assertEquals(expectedExceptionMessage, e.getMessage());
@@ -380,8 +393,8 @@ public class SystemFlagsDataArchiveTest {
assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1");
}
- private SystemFlagsDataArchive fromDirectory(String testDirectory, boolean forceAddFiles) {
- return SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/" + testDirectory), createZoneRegistryMock(), forceAddFiles);
+ private SystemFlagsDataArchive fromDirectory(String testDirectory, boolean simulateInController) {
+ return SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/" + testDirectory), createZoneRegistryMock(), simulateInController);
}
@SuppressWarnings("unchecked") // workaround for mocking a method for generic return type
@@ -396,12 +409,21 @@ public class SystemFlagsDataArchiveTest {
when(registryMock.systemZone()).thenReturn(zoneApi);
when(registryMock.getConfigServerVipUri(any())).thenReturn(URI.create("http://localhost:8080/"));
when(registryMock.getConfigServerHttpsIdentity(any())).thenReturn(new AthenzService("domain", "servicename"));
+ ZoneList zones = mockZoneList("prod.us-west-1", "prod.us-east-3");
+ when(registryMock.zones()).thenReturn(zones);
+ ZoneList zonesIncludingSystem = mockZoneList("prod.us-west-1", "prod.us-east-3", "prod.controller");
+ when(registryMock.zonesIncludingSystem()).thenReturn(zonesIncludingSystem);
+ return registryMock;
+ }
+
+ @SuppressWarnings("unchecked") // workaround for mocking a method for generic return type
+ private static ZoneList mockZoneList(String... zones) {
ZoneList zoneListMock = mock(ZoneList.class);
when(zoneListMock.reachable()).thenReturn(zoneListMock);
when(zoneListMock.all()).thenReturn(zoneListMock);
- when(zoneListMock.zones()).thenReturn((List)List.of(new SimpleZone("prod.us-west-1"), new SimpleZone("prod.us-east-3")));
- when(registryMock.zones()).thenReturn(zoneListMock);
- return registryMock;
+ List<? extends ZoneApi> zoneList = Stream.of(zones).map(SimpleZone::new).toList();
+ when(zoneListMock.zones()).thenReturn((List) zoneList);
+ return zoneListMock;
}
private static void assertArchiveReturnsCorrectTestFlagDataForTarget(SystemFlagsDataArchive archive) {
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java b/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
index 749f6830870..031b61c8e7e 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
@@ -68,6 +68,10 @@ public class RelationalCondition implements Condition {
return fetchVector.getValue(dimension).map(predicate::test).orElse(false);
}
+ public RelationalPredicate relationalPredicate() {
+ return relationalPredicate;
+ }
+
@Override
public WireCondition toWire() {
var condition = new WireCondition();
diff --git a/messagebus/src/vespa/messagebus/network/rpctarget.cpp b/messagebus/src/vespa/messagebus/network/rpctarget.cpp
index 9c6ca9dff69..d7f3e77c6fd 100644
--- a/messagebus/src/vespa/messagebus/network/rpctarget.cpp
+++ b/messagebus/src/vespa/messagebus/network/rpctarget.cpp
@@ -5,8 +5,8 @@
namespace mbus {
-RPCTarget::RPCTarget(const string &spec, FRT_Supervisor &orb) :
- _lock(),
+RPCTarget::RPCTarget(const string &spec, FRT_Supervisor &orb, ctor_tag)
+ : _lock(),
_orb(orb),
_name(spec),
_target(*_orb.GetTarget(spec.c_str())),
@@ -48,6 +48,7 @@ RPCTarget::resolveVersion(duration timeout, RPCTarget::IVersionHandler &handler)
handler.handleVersion(_version.get());
} else if (shouldInvoke) {
FRT_RPCRequest *req = _orb.AllocRPCRequest();
+ req->getStash().create<SP>(shared_from_this());
req->SetMethodName("mbus.getVersion");
_target.InvokeAsync(req, vespalib::to_s(timeout), this);
}
@@ -67,8 +68,9 @@ RPCTarget::isValid() const
}
void
-RPCTarget::RequestDone(FRT_RPCRequest *req)
+RPCTarget::RequestDone(FRT_RPCRequest *raw_req)
{
+ auto req = vespalib::ref_counted<FRT_RPCRequest>::internal_attach(raw_req);
HandlerList handlers;
{
std::lock_guard guard(_lock);
@@ -94,7 +96,6 @@ RPCTarget::RequestDone(FRT_RPCRequest *req)
_state = (_version.get() ? VERSION_RESOLVED : VERSION_NOT_RESOLVED);
}
_cond.notify_all();
- req->internal_subref();
}
} // namespace mbus
diff --git a/messagebus/src/vespa/messagebus/network/rpctarget.h b/messagebus/src/vespa/messagebus/network/rpctarget.h
index fffffae64f7..77fcef5f48f 100644
--- a/messagebus/src/vespa/messagebus/network/rpctarget.h
+++ b/messagebus/src/vespa/messagebus/network/rpctarget.h
@@ -13,7 +13,7 @@ namespace mbus {
* target. Instances of this class are returned by {@link RPCService}, and
* cached by {@link RPCTargetPool}.
*/
-class RPCTarget : public FRT_IRequestWait {
+class RPCTarget : public FRT_IRequestWait, public std::enable_shared_from_this<RPCTarget> {
public:
/**
* Declares a version handler used when resolving the version of a target.
@@ -58,6 +58,7 @@ private:
Version_UP _version;
HandlerList _versionHandlers;
+ struct ctor_tag {};
public:
/**
* Convenience typedefs.
@@ -72,7 +73,10 @@ public:
* @param spec The connection spec of this target.
* @param orb The FRT supervisor to use when connecting to target.
*/
- RPCTarget(const string &name, FRT_Supervisor &orb);
+ RPCTarget(const string &name, FRT_Supervisor &orb, ctor_tag);
+ static SP create(const string &name, FRT_Supervisor &orb) {
+ return std::make_shared<RPCTarget>(name, orb, ctor_tag{});
+ }
/**
* Destructor. Subrefs the contained FRT target.
diff --git a/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp b/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp
index b403c65f863..db09b127114 100644
--- a/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp
+++ b/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp
@@ -97,7 +97,7 @@ RPCTargetPool::getTarget(FRT_Supervisor &orb, const RPCServiceAddress &address)
std::vector<RPCTarget::SP> targets;
targets.reserve(_numTargetsPerSpec);
for (size_t i(0); i < _numTargetsPerSpec; i++) {
- targets.push_back(std::make_shared<RPCTarget>(spec, orb));
+ targets.push_back(RPCTarget::create(spec, orb));
}
_targets.insert(TargetMap::value_type(spec, Entry(std::move(targets), currentTime)));
return _targets.find(spec)->second.getTarget(guard, currentTime);
diff --git a/storage/src/tests/distributor/distributor_bucket_space_test.cpp b/storage/src/tests/distributor/distributor_bucket_space_test.cpp
index 6e5863298ce..3ea4c1ca3c2 100644
--- a/storage/src/tests/distributor/distributor_bucket_space_test.cpp
+++ b/storage/src/tests/distributor/distributor_bucket_space_test.cpp
@@ -106,13 +106,13 @@ DistributorBucketSpaceTest::count_service_layer_buckets(const std::vector<Bucket
for (uint32_t i = 0; i < 3; ++i) {
switch (i) {
case 0:
- ideal_nodes = ideal_nodes_bundle.get_available_nodes();
+ ideal_nodes = ideal_nodes_bundle.available_nodes();
break;
case 1:
- ideal_nodes = ideal_nodes_bundle.get_available_nonretired_nodes();
+ ideal_nodes = ideal_nodes_bundle.available_nonretired_nodes();
break;
case 2:
- ideal_nodes = ideal_nodes_bundle.get_available_nonretired_or_maintenance_nodes();
+ ideal_nodes = ideal_nodes_bundle.available_nonretired_or_maintenance_nodes();
break;
default:
;
diff --git a/storage/src/tests/distributor/pendingmessagetrackertest.cpp b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
index 3bfa1027a82..8277281206d 100644
--- a/storage/src/tests/distributor/pendingmessagetrackertest.cpp
+++ b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
@@ -162,10 +162,14 @@ TEST_F(PendingMessageTrackerTest, simple) {
clock.setAbsoluteTimeInSeconds(1);
PendingMessageTracker tracker(compReg, 0);
+ std::ostringstream dummy; // Enable time tracking
+ tracker.reportStatus(dummy, framework::HttpUrlPath("/pendingmessages?order=bucket"));
+
auto remove = std::make_shared<api::RemoveCommand>(
makeDocumentBucket(document::BucketId(16, 1234)),
document::DocumentId("id:footype:testdoc:n=1234:foo"), 1001);
remove->setAddress(makeStorageAddress(0));
+
tracker.insert(remove);
{
@@ -238,6 +242,8 @@ TEST_F(PendingMessageTrackerTest, multiple_messages) {
compReg.setClock(clock);
clock.setAbsoluteTimeInSeconds(1);
PendingMessageTracker tracker(compReg, 0);
+ std::ostringstream dummy; // Enable time tracking
+ tracker.reportStatus(dummy, framework::HttpUrlPath("/pendingmessages?order=bucket"));
insertMessages(tracker);
diff --git a/storage/src/tests/distributor/statecheckerstest.cpp b/storage/src/tests/distributor/statecheckerstest.cpp
index 16854cd63c6..4ca4d70a816 100644
--- a/storage/src/tests/distributor/statecheckerstest.cpp
+++ b/storage/src/tests/distributor/statecheckerstest.cpp
@@ -1383,9 +1383,8 @@ std::string StateCheckersTest::testGarbageCollection(
getBucketDatabase().update(e);
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(node_context(), operation_context(),
- getDistributorBucketSpace(), statsTracker,
- makeDocumentBucket(e.getBucketId()));
+ StateChecker::Context c(node_context(), operation_context(), getDistributorBucketSpace(),
+ statsTracker, makeDocumentBucket(e.getBucketId()));
getClock().setAbsoluteTimeInSeconds(nowTimestamp);
return testStateChecker(checker, c, false, PendingMessage(), includePriority, includeSchedulingPri);
}
@@ -1394,38 +1393,29 @@ TEST_F(StateCheckersTest, garbage_collection) {
// BucketId(17, 0) has id (and thus 'hash') 0x4400000000000000. With a
// check interval modulo of 3600, this implies a start point of 848.
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(900, 3600 + 847, 3600));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(900, 3600 + 847, 3600));
- EXPECT_EQ("[Needs garbage collection: Last check at 900, current time 4448, "
- "configured interval 3600]",
+ EXPECT_EQ("[Needs garbage collection: Last check at 900, current time 4448, configured interval 3600]",
testGarbageCollection(900, 3600 + 848, 3600));
- EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 3600]",
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, configured interval 3600]",
testGarbageCollection(3, 4000, 3600));
// GC start point 3648.
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3, 3647, 8000));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3, 3647, 8000));
- EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 3600]",
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, configured interval 3600]",
testGarbageCollection(3, 4000, 3600));
// GC explicitly disabled.
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3, 4000, 0));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3, 4000, 0));
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3, 3, 1));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3, 3, 1));
- EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 300] (pri 200)",
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, configured interval 300] (pri 200)",
testGarbageCollection(3, 4000, 300, 1, true));
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3850, 4000, 300, 1));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3850, 4000, 300, 1));
}
TEST_F(StateCheckersTest, gc_ops_are_prioritized_with_low_priority_category) {
@@ -1597,11 +1587,10 @@ TEST_F(StateCheckersTest, context_populates_ideal_state_containers) {
StateChecker::Context c(node_context(), operation_context(),
getDistributorBucketSpace(), statsTracker, makeDocumentBucket({17, 0}));
- ASSERT_THAT(c.idealState, ElementsAre(1, 3));
- // TODO replace with UnorderedElementsAre once we can build gmock without issues
- std::vector<uint16_t> ideal_state(c.unorderedIdealState.begin(), c.unorderedIdealState.end());
- std::sort(ideal_state.begin(), ideal_state.end());
- ASSERT_THAT(ideal_state, ElementsAre(1, 3));
+ ASSERT_THAT(c.idealState(), ElementsAre(1, 3));
+ for (uint16_t node : c.idealState()) {
+ ASSERT_TRUE(c.idealStateBundle.is_nonretired_or_maintenance(node));
+ }
}
namespace {
@@ -1616,8 +1605,7 @@ public:
explicit StateCheckerRunner(StateCheckersTest& fixture);
~StateCheckerRunner();
- StateCheckerRunner& addToDb(const document::BucketId& bid,
- const std::string& bucketInfo)
+ StateCheckerRunner& addToDb(const document::BucketId& bid, const std::string& bucketInfo)
{
_fixture.addNodesToBucketDB(bid, bucketInfo);
return *this;
@@ -1652,8 +1640,7 @@ public:
Checker checker;
StateChecker::Context c(_fixture.node_context(), _fixture.operation_context(),
_fixture.getDistributorBucketSpace(), _statsTracker, makeDocumentBucket(bid));
- _result = _fixture.testStateChecker(
- checker, c, false, StateCheckersTest::PendingMessage(), false);
+ _result = _fixture.testStateChecker(checker, c, false, StateCheckersTest::PendingMessage(), false);
}
const std::string& result() const { return _result; }
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
index 5ccdb214854..299aaffb569 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
@@ -108,9 +108,7 @@ DistributorBucketSpace::owns_bucket_in_state(
}
bool
-DistributorBucketSpace::owns_bucket_in_state(
- const lib::ClusterState& clusterState,
- document::BucketId bucket) const
+DistributorBucketSpace::owns_bucket_in_state(const lib::ClusterState& clusterState, document::BucketId bucket) const
{
return owns_bucket_in_state(*_distribution, clusterState, bucket);
}
@@ -152,7 +150,7 @@ DistributorBucketSpace::get_ideal_service_layer_nodes_bundle(document::BucketId
setup_ideal_nodes_bundle(ideal_nodes_bundle, *_distribution, *_clusterState, bucket);
return ideal_nodes_bundle;
}
- document::BucketId lookup_bucket(is_split_group_bucket(bucket) ? bucket.getUsedBits() : _distribution_bits, bucket.getId());
+ document::BucketId lookup_bucket(_distribution_bits, bucket.getId());
auto itr = _ideal_nodes.find(lookup_bucket);
if (itr != _ideal_nodes.end()) {
return *itr->second;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
index ede85c036b3..37d81f45ac1 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
@@ -314,7 +314,7 @@ DistributorStripe::enterRecoveryMode()
{
LOG(debug, "Entering recovery mode");
_schedulingMode = MaintenanceScheduler::RECOVERY_SCHEDULING_MODE;
- _scanner->reset();
+ _scanner->reset(); // Just drop accumulated stat on the floor.
// We enter recovery mode due to cluster state or distribution config changes.
// Until we have completed a new DB scan round, we don't know the state of our
// newly owned buckets and must not report stats for these out to the cluster
@@ -643,7 +643,7 @@ DistributorStripe::updateInternalMetricsForCompletedScan()
_bucketDBMetricUpdater.completeRound();
_bucketDbStats = _bucketDBMetricUpdater.getLastCompleteStats();
- _maintenanceStats = _scanner->getPendingMaintenanceStats();
+ _maintenanceStats = _scanner->reset();
auto new_space_stats = toBucketSpacesStats(_maintenanceStats.perNodeStats);
if (merge_no_longer_pending_edge(_bucketSpacesStats, new_space_stats)) {
_must_send_updated_host_info = true;
@@ -684,7 +684,6 @@ DistributorStripe::scanNextBucket()
updateInternalMetricsForCompletedScan();
leaveRecoveryMode();
send_updated_host_info_if_required();
- _scanner->reset();
} else {
const auto &distribution(_bucketSpaceRepo->get(scanResult.getBucketSpace()).getDistribution());
_bucketDBMetricUpdater.visit(scanResult.getEntry(), distribution.getRedundancy());
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
index 9a5fd595b1d..1121c5071c0 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
@@ -168,7 +168,7 @@ DistributorStripeComponent::update_bucket_database(
UpdateBucketDatabaseProcessor processor(getClock(),
found_down_node ? up_nodes : changed_nodes,
- bucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).get_available_nodes(),
+ bucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).available_nodes(),
(update_flags & DatabaseUpdate::RESET_TRUSTED) != 0);
bucketSpace.getBucketDatabase().process_update(bucket.getBucketId(), processor, (update_flags & DatabaseUpdate::CREATE_IF_NONEXISTING) != 0);
diff --git a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp
index 0d37219356e..cc4eedd2a35 100644
--- a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp
+++ b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp
@@ -2,16 +2,27 @@
#include "ideal_service_layer_nodes_bundle.h"
#include <vespa/vdslib/distribution/idealnodecalculator.h>
+#include <vespa/vespalib/stllike/hash_set_insert.hpp>
+
namespace storage::distributor {
IdealServiceLayerNodesBundle::IdealServiceLayerNodesBundle() noexcept
: _available_nodes(),
_available_nonretired_nodes(),
- _available_nonretired_or_maintenance_nodes()
+ _available_nonretired_or_maintenance_nodes(),
+ _unordered_nonretired_or_maintenance_nodes()
{
}
+void
+IdealServiceLayerNodesBundle::set_available_nonretired_or_maintenance_nodes(std::vector<uint16_t> available_nonretired_or_maintenance_nodes) {
+ _available_nonretired_or_maintenance_nodes = std::move(available_nonretired_or_maintenance_nodes);
+ _unordered_nonretired_or_maintenance_nodes.clear();
+ _unordered_nonretired_or_maintenance_nodes.insert(_available_nonretired_or_maintenance_nodes.begin(),
+ _available_nonretired_or_maintenance_nodes.end());
+}
+
IdealServiceLayerNodesBundle::IdealServiceLayerNodesBundle(IdealServiceLayerNodesBundle &&) noexcept = default;
IdealServiceLayerNodesBundle::~IdealServiceLayerNodesBundle() = default;
diff --git a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h
index 8f9cb8e14ec..9577ec09208 100644
--- a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h
+++ b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h
@@ -1,8 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vector>
-#include <cstdint>
+#include <vespa/vespalib/stllike/hash_set.h>
namespace storage::distributor {
@@ -13,6 +12,7 @@ class IdealServiceLayerNodesBundle {
std::vector<uint16_t> _available_nodes;
std::vector<uint16_t> _available_nonretired_nodes;
std::vector<uint16_t> _available_nonretired_or_maintenance_nodes;
+ vespalib::hash_set<uint16_t> _unordered_nonretired_or_maintenance_nodes;
public:
IdealServiceLayerNodesBundle() noexcept;
IdealServiceLayerNodesBundle(IdealServiceLayerNodesBundle &&) noexcept;
@@ -24,14 +24,15 @@ public:
void set_available_nonretired_nodes(std::vector<uint16_t> available_nonretired_nodes) {
_available_nonretired_nodes = std::move(available_nonretired_nodes);
}
- void set_available_nonretired_or_maintenance_nodes(std::vector<uint16_t> available_nonretired_or_maintenance_nodes) {
- _available_nonretired_or_maintenance_nodes = std::move(available_nonretired_or_maintenance_nodes);
- }
- const std::vector<uint16_t> & get_available_nodes() const { return _available_nodes; }
- const std::vector<uint16_t> & get_available_nonretired_nodes() const { return _available_nonretired_nodes; }
- const std::vector<uint16_t> & get_available_nonretired_or_maintenance_nodes() const {
+ void set_available_nonretired_or_maintenance_nodes(std::vector<uint16_t> available_nonretired_or_maintenance_nodes);
+ const std::vector<uint16_t> & available_nodes() const noexcept { return _available_nodes; }
+ const std::vector<uint16_t> & available_nonretired_nodes() const noexcept { return _available_nonretired_nodes; }
+ const std::vector<uint16_t> & available_nonretired_or_maintenance_nodes() const noexcept {
return _available_nonretired_or_maintenance_nodes;
}
+ bool is_nonretired_or_maintenance(uint16_t node) const noexcept {
+ return _unordered_nonretired_or_maintenance_nodes.contains(node);
+ }
};
}
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
index 7ac99f5712f..592d92940d6 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
@@ -1,6 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "node_maintenance_stats_tracker.h"
+#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <vespa/vespalib/stllike/hash_map_equal.hpp>
#include <ostream>
namespace storage::distributor {
@@ -22,6 +24,54 @@ merge_bucket_spaces_stats(NodeMaintenanceStatsTracker::BucketSpacesStats& dest,
}
void
+NodeMaintenanceStatsTracker::incMovingOut(uint16_t node, document::BucketSpace bucketSpace) {
+ ++_node_stats[node][bucketSpace].movingOut;
+ ++_total_stats.movingOut;
+}
+
+void
+NodeMaintenanceStatsTracker::incSyncing(uint16_t node, document::BucketSpace bucketSpace) {
+ ++_node_stats[node][bucketSpace].syncing;
+ ++_total_stats.syncing;
+}
+
+void
+NodeMaintenanceStatsTracker::incCopyingIn(uint16_t node, document::BucketSpace bucketSpace) {
+ ++_node_stats[node][bucketSpace].copyingIn;
+ ++_total_stats.copyingIn;
+}
+
+void
+NodeMaintenanceStatsTracker::incCopyingOut(uint16_t node, document::BucketSpace bucketSpace) {
+ ++_node_stats[node][bucketSpace].copyingOut;
+ ++_total_stats.copyingOut;
+}
+
+void
+NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker::incTotal(uint16_t node, document::BucketSpace bucketSpace) {
+ ++_node_stats[node][bucketSpace].total;
+ ++_total_stats.total;
+}
+
+const NodeMaintenanceStats&
+NodeMaintenanceStatsTracker::forNode(uint16_t node, document::BucketSpace bucketSpace) const {
+ auto nodeItr = _node_stats.find(node);
+ if (nodeItr != _node_stats.end()) {
+ auto bucketSpaceItr = nodeItr->second.find(bucketSpace);
+ if (bucketSpaceItr != nodeItr->second.end()) {
+ return bucketSpaceItr->second;
+ }
+ }
+ return _emptyNodeMaintenanceStats;
+}
+
+bool
+NodeMaintenanceStatsTracker::operator==(const NodeMaintenanceStatsTracker& rhs) const noexcept {
+ return ((_node_stats == rhs._node_stats) &&
+ (_max_observed_time_since_last_gc == rhs._max_observed_time_since_last_gc));
+}
+
+void
NodeMaintenanceStatsTracker::merge(const NodeMaintenanceStatsTracker& rhs)
{
for (const auto& entry : rhs._node_stats) {
@@ -45,13 +95,24 @@ operator<<(std::ostream& os, const NodeMaintenanceStats& stats)
return os;
}
-NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker()
+NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker() noexcept
: _node_stats(),
_total_stats(),
_max_observed_time_since_last_gc(0)
{}
+NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker(NodeMaintenanceStatsTracker &&) noexcept = default;
+NodeMaintenanceStatsTracker & NodeMaintenanceStatsTracker::operator =(NodeMaintenanceStatsTracker &&) noexcept = default;
+NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker(const NodeMaintenanceStatsTracker &) = default;
NodeMaintenanceStatsTracker::~NodeMaintenanceStatsTracker() = default;
+void
+NodeMaintenanceStatsTracker::reset(size_t nodes) {
+ _node_stats.clear();
+ _node_stats.resize(nodes);
+ _total_stats = NodeMaintenanceStats();
+ _max_observed_time_since_last_gc = vespalib::duration::zero();
+}
+
}
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
index 3818dd4bacb..84705fbca9d 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
@@ -1,9 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <unordered_map>
#include <vespa/document/bucket/bucketspace.h>
#include <vespa/vespalib/util/time.h>
+#include <vespa/vespalib/stllike/hash_map.h>
namespace storage::distributor {
@@ -51,8 +51,8 @@ std::ostream& operator<<(std::ostream&, const NodeMaintenanceStats&);
class NodeMaintenanceStatsTracker
{
public:
- using BucketSpacesStats = std::unordered_map<document::BucketSpace, NodeMaintenanceStats, document::BucketSpace::hash>;
- using PerNodeStats = std::unordered_map<uint16_t, BucketSpacesStats>;
+ using BucketSpacesStats = vespalib::hash_map<document::BucketSpace, NodeMaintenanceStats, document::BucketSpace::hash>;
+ using PerNodeStats = vespalib::hash_map<uint16_t, BucketSpacesStats>;
private:
PerNodeStats _node_stats;
@@ -62,33 +62,23 @@ private:
static const NodeMaintenanceStats _emptyNodeMaintenanceStats;
public:
- NodeMaintenanceStatsTracker();
+ NodeMaintenanceStatsTracker() noexcept;
+ NodeMaintenanceStatsTracker(NodeMaintenanceStatsTracker &&) noexcept;
+ NodeMaintenanceStatsTracker & operator =(NodeMaintenanceStatsTracker &&) noexcept;
+ NodeMaintenanceStatsTracker(const NodeMaintenanceStatsTracker &);
~NodeMaintenanceStatsTracker();
+ void reset(size_t nodes);
+ size_t numNodes() const { return _node_stats.size(); }
- void incMovingOut(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].movingOut;
- ++_total_stats.movingOut;
- }
+ void incMovingOut(uint16_t node, document::BucketSpace bucketSpace);
- void incSyncing(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].syncing;
- ++_total_stats.syncing;
- }
+ void incSyncing(uint16_t node, document::BucketSpace bucketSpace);
- void incCopyingIn(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].copyingIn;
- ++_total_stats.copyingIn;
- }
+ void incCopyingIn(uint16_t node, document::BucketSpace bucketSpace);
- void incCopyingOut(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].copyingOut;
- ++_total_stats.copyingOut;
- }
+ void incCopyingOut(uint16_t node, document::BucketSpace bucketSpace);
- void incTotal(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].total;
- ++_total_stats.total;
- }
+ void incTotal(uint16_t node, document::BucketSpace bucketSpace);
void update_observed_time_since_last_gc(vespalib::duration time_since_gc) noexcept {
_max_observed_time_since_last_gc = std::max(time_since_gc, _max_observed_time_since_last_gc);
@@ -98,18 +88,9 @@ public:
* Returned statistics for a given node index and bucket space, or all zero statistics
* if none have been recorded yet
*/
- const NodeMaintenanceStats& forNode(uint16_t node, document::BucketSpace bucketSpace) const {
- auto nodeItr = _node_stats.find(node);
- if (nodeItr != _node_stats.end()) {
- auto bucketSpaceItr = nodeItr->second.find(bucketSpace);
- if (bucketSpaceItr != nodeItr->second.end()) {
- return bucketSpaceItr->second;
- }
- }
- return _emptyNodeMaintenanceStats;
- }
+ const NodeMaintenanceStats& forNode(uint16_t node, document::BucketSpace bucketSpace) const;
- const PerNodeStats& perNodeStats() const {
+ const PerNodeStats& perNodeStats() const noexcept {
return _node_stats;
}
@@ -124,10 +105,7 @@ public:
return _max_observed_time_since_last_gc;
}
- bool operator==(const NodeMaintenanceStatsTracker& rhs) const {
- return ((_node_stats == rhs._node_stats) &&
- (_max_observed_time_since_last_gc == rhs._max_observed_time_since_last_gc));
- }
+ bool operator==(const NodeMaintenanceStatsTracker& rhs) const noexcept;
void merge(const NodeMaintenanceStatsTracker& rhs);
};
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
index afcbef32584..e0c1abaaffa 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
@@ -41,11 +41,20 @@ SimpleMaintenanceScanner::PendingMaintenanceStats::merge(const PendingMaintenanc
perNodeStats.merge(rhs.perNodeStats);
}
-SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats() = default;
+SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats() noexcept = default;
SimpleMaintenanceScanner::PendingMaintenanceStats::~PendingMaintenanceStats() = default;
SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats(const PendingMaintenanceStats &) = default;
+SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats(PendingMaintenanceStats &&) noexcept = default;
SimpleMaintenanceScanner::PendingMaintenanceStats &
-SimpleMaintenanceScanner::PendingMaintenanceStats::operator = (const PendingMaintenanceStats &) = default;
+SimpleMaintenanceScanner::PendingMaintenanceStats::operator = (PendingMaintenanceStats &&) noexcept = default;
+
+SimpleMaintenanceScanner::PendingMaintenanceStats
+SimpleMaintenanceScanner::PendingMaintenanceStats::reset() {
+ PendingMaintenanceStats prev = std::move(*this);
+ global = GlobalMaintenanceStats();
+ perNodeStats.reset(prev.perNodeStats.numNodes());
+ return prev;
+}
MaintenanceScanner::ScanResult
SimpleMaintenanceScanner::scanNext()
@@ -68,12 +77,12 @@ SimpleMaintenanceScanner::scanNext()
}
}
-void
+SimpleMaintenanceScanner::PendingMaintenanceStats
SimpleMaintenanceScanner::reset()
{
_bucketCursor = document::BucketId();
_bucketSpaceItr = _bucketSpaceRepo.begin();
- _pendingMaintenance = PendingMaintenanceStats();
+ return _pendingMaintenance.reset();
}
void
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
index 7af61815c31..35b022c7af7 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
@@ -23,11 +23,14 @@ public:
void merge(const GlobalMaintenanceStats& rhs) noexcept;
};
struct PendingMaintenanceStats {
- PendingMaintenanceStats();
+ PendingMaintenanceStats() noexcept;
PendingMaintenanceStats(const PendingMaintenanceStats &);
- PendingMaintenanceStats &operator = (const PendingMaintenanceStats &);
+ PendingMaintenanceStats &operator = (const PendingMaintenanceStats &) = delete;
+ PendingMaintenanceStats(PendingMaintenanceStats &&) noexcept;
+ PendingMaintenanceStats &operator = (PendingMaintenanceStats &&) noexcept;
~PendingMaintenanceStats();
- GlobalMaintenanceStats global;
+ PendingMaintenanceStats reset();
+ GlobalMaintenanceStats global;
NodeMaintenanceStatsTracker perNodeStats;
void merge(const PendingMaintenanceStats& rhs);
@@ -50,11 +53,12 @@ public:
~SimpleMaintenanceScanner() override;
ScanResult scanNext() override;
- void reset();
+ PendingMaintenanceStats reset();
// TODO: move out into own interface!
void prioritizeBucket(const document::Bucket &id);
+ // TODO Only for testing
const PendingMaintenanceStats& getPendingMaintenanceStats() const noexcept {
return _pendingMaintenance;
}
diff --git a/storage/src/vespa/storage/distributor/messagetracker.h b/storage/src/vespa/storage/distributor/messagetracker.h
index 51d29551de0..a0234f425a0 100644
--- a/storage/src/vespa/storage/distributor/messagetracker.h
+++ b/storage/src/vespa/storage/distributor/messagetracker.h
@@ -26,8 +26,8 @@ public:
};
MessageTracker(const ClusterContext& cluster_context);
- MessageTracker(MessageTracker&&) = default;
- MessageTracker& operator=(MessageTracker&&) = delete;
+ MessageTracker(MessageTracker&&) noexcept = default;
+ MessageTracker& operator=(MessageTracker&&) noexcept = delete;
MessageTracker(const MessageTracker &) = delete;
MessageTracker& operator=(const MessageTracker&) = delete;
~MessageTracker();
diff --git a/storage/src/vespa/storage/distributor/nodeinfo.cpp b/storage/src/vespa/storage/distributor/nodeinfo.cpp
index 6bb1949d606..3e645f57393 100644
--- a/storage/src/vespa/storage/distributor/nodeinfo.cpp
+++ b/storage/src/vespa/storage/distributor/nodeinfo.cpp
@@ -5,14 +5,16 @@
namespace storage::distributor {
-NodeInfo::NodeInfo(const framework::Clock& clock)
+NodeInfo::NodeInfo(const framework::Clock& clock) noexcept
: _clock(clock) {}
-uint32_t NodeInfo::getPendingCount(uint16_t idx) const {
+uint32_t
+NodeInfo::getPendingCount(uint16_t idx) const {
return getNode(idx)._pending;
}
-bool NodeInfo::isBusy(uint16_t idx) const {
+bool
+NodeInfo::isBusy(uint16_t idx) const {
const SingleNodeInfo& info = getNode(idx);
if (info._busyUntilTime.time_since_epoch().count() != 0) {
if (_clock.getMonotonicTime() > info._busyUntilTime) {
@@ -25,15 +27,18 @@ bool NodeInfo::isBusy(uint16_t idx) const {
return false;
}
-void NodeInfo::setBusy(uint16_t idx, vespalib::duration for_duration) {
+void
+NodeInfo::setBusy(uint16_t idx, vespalib::duration for_duration) {
getNode(idx)._busyUntilTime = _clock.getMonotonicTime() + for_duration;
}
-void NodeInfo::incPending(uint16_t idx) {
+void
+NodeInfo::incPending(uint16_t idx) {
getNode(idx)._pending++;
}
-void NodeInfo::decPending(uint16_t idx) {
+void
+NodeInfo::decPending(uint16_t idx) {
SingleNodeInfo& info = getNode(idx);
if (info._pending > 0) {
@@ -41,12 +46,14 @@ void NodeInfo::decPending(uint16_t idx) {
}
}
-void NodeInfo::clearPending(uint16_t idx) {
+void
+NodeInfo::clearPending(uint16_t idx) {
SingleNodeInfo& info = getNode(idx);
info._pending = 0;
}
-NodeInfo::SingleNodeInfo& NodeInfo::getNode(uint16_t idx) {
+NodeInfo::SingleNodeInfo&
+NodeInfo::getNode(uint16_t idx) {
const auto index_lbound = static_cast<size_t>(idx) + 1;
while (_nodes.size() < index_lbound) {
_nodes.emplace_back();
@@ -55,7 +62,8 @@ NodeInfo::SingleNodeInfo& NodeInfo::getNode(uint16_t idx) {
return _nodes[idx];
}
-const NodeInfo::SingleNodeInfo& NodeInfo::getNode(uint16_t idx) const {
+const NodeInfo::SingleNodeInfo&
+NodeInfo::getNode(uint16_t idx) const {
const auto index_lbound = static_cast<size_t>(idx) + 1;
while (_nodes.size() < index_lbound) {
_nodes.emplace_back();
diff --git a/storage/src/vespa/storage/distributor/nodeinfo.h b/storage/src/vespa/storage/distributor/nodeinfo.h
index 7f0716d7804..446739ca7e9 100644
--- a/storage/src/vespa/storage/distributor/nodeinfo.h
+++ b/storage/src/vespa/storage/distributor/nodeinfo.h
@@ -17,30 +17,24 @@ namespace storage::distributor {
class NodeInfo {
public:
- explicit NodeInfo(const framework::Clock& clock);
-
+ explicit NodeInfo(const framework::Clock& clock) noexcept;
uint32_t getPendingCount(uint16_t idx) const;
-
bool isBusy(uint16_t idx) const;
-
void setBusy(uint16_t idx, vespalib::duration for_duration);
-
void incPending(uint16_t idx);
-
void decPending(uint16_t idx);
-
void clearPending(uint16_t idx);
private:
struct SingleNodeInfo {
- SingleNodeInfo() : _pending(0), _busyUntilTime() {}
+ SingleNodeInfo() noexcept : _pending(0), _busyUntilTime() {}
- uint32_t _pending;
+ uint32_t _pending;
mutable vespalib::steady_time _busyUntilTime;
};
mutable std::vector<SingleNodeInfo> _nodes;
- const framework::Clock& _clock;
+ const framework::Clock& _clock;
const SingleNodeInfo& getNode(uint16_t idx) const;
SingleNodeInfo& getNode(uint16_t idx);
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
index f76b6495443..86ea9a559f5 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
@@ -67,7 +67,8 @@ PutOperation::insertDatabaseEntryAndScheduleCreateBucket(const OperationTargetLi
assert(!multipleBuckets);
(void) multipleBuckets;
BucketDatabase::Entry entry(_bucket_space.getBucketDatabase().get(lastBucket));
- const std::vector<uint16_t> & idealState = _bucket_space.get_ideal_service_layer_nodes_bundle(lastBucket).get_available_nodes();
+ const std::vector<uint16_t> & idealState = _bucket_space.get_ideal_service_layer_nodes_bundle(
+ lastBucket).available_nodes();
active = ActiveCopy::calculate(idealState, _bucket_space.getDistribution(), entry,
_op_ctx.distributor_config().max_activation_inhibited_out_of_sync_groups());
LOG(debug, "Active copies for bucket %s: %s", entry.getBucketId().toString().c_str(), active.toString().c_str());
diff --git a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp
index e3aa7e1ef6e..fcd27fdab74 100644
--- a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp
+++ b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp
@@ -71,7 +71,8 @@ BucketInstanceList::populate(const document::BucketId& specificId, const Distrib
std::vector<BucketDatabase::Entry> entries;
db.getParents(specificId, entries);
for (const auto & entry : entries) {
- lib::IdealNodeList idealNodes(make_node_list(distributor_bucket_space.get_ideal_service_layer_nodes_bundle(entry.getBucketId()).get_available_nonretired_or_maintenance_nodes()));
+ lib::IdealNodeList idealNodes(make_node_list(distributor_bucket_space.get_ideal_service_layer_nodes_bundle(
+ entry.getBucketId()).available_nonretired_or_maintenance_nodes()));
add(entry, idealNodes);
}
}
@@ -127,7 +128,8 @@ BucketInstanceList::extendToEnoughCopies(
: _instances[0]._bucket);
newTarget = leastSpecificLeafBucketInSubtree(newTarget, mostSpecificId, db);
- lib::IdealNodeList idealNodes(make_node_list(distributor_bucket_space.get_ideal_service_layer_nodes_bundle(newTarget).get_available_nonretired_nodes()));
+ lib::IdealNodeList idealNodes(make_node_list(
+ distributor_bucket_space.get_ideal_service_layer_nodes_bundle(newTarget).available_nonretired_nodes()));
for (uint32_t i=0; i<idealNodes.size(); ++i) {
if (!contains(idealNodes[i])) {
_instances.push_back(BucketInstance(
diff --git a/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp b/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp
index 5b8fa6b69e3..7b3cdacf702 100644
--- a/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp
@@ -17,6 +17,7 @@ PendingMessageTracker::PendingMessageTracker(framework::ComponentRegister& cr, u
_nodeInfo(_component.getClock()),
_nodeBusyDuration(60s),
_deferred_read_tasks(),
+ _trackTime(false),
_lock()
{
_component.registerStatusPage(*this);
@@ -69,6 +70,13 @@ pairAsRange(Pair pair)
return PairAsRange<Pair>(std::move(pair));
}
+document::Bucket
+getBucket(const api::StorageMessage & msg) {
+ return (msg.getType() != api::MessageType::REQUESTBUCKETINFO)
+ ? msg.getBucket()
+ : document::Bucket(msg.getBucket().getBucketSpace(), dynamic_cast<const api::RequestBucketInfoCommand&>(msg).super_bucket_id());
+}
+
}
std::vector<uint64_t>
@@ -91,17 +99,19 @@ PendingMessageTracker::clearMessagesForNode(uint16_t node)
void
PendingMessageTracker::insert(const std::shared_ptr<api::StorageMessage>& msg)
{
- std::lock_guard guard(_lock);
if (msg->getAddress()) {
// TODO STRIPE reevaluate if getBucket() on RequestBucketInfo msgs should transparently return superbucket..!
- document::Bucket bucket = (msg->getType() != api::MessageType::REQUESTBUCKETINFO)
- ? msg->getBucket()
- : document::Bucket(msg->getBucket().getBucketSpace(),
- dynamic_cast<api::RequestBucketInfoCommand&>(*msg).super_bucket_id());
- _messages.emplace(currentTime(), msg->getType().getId(), msg->getPriority(), msg->getMsgId(),
- bucket, msg->getAddress()->getIndex());
-
- _nodeInfo.incPending(msg->getAddress()->getIndex());
+ document::Bucket bucket = getBucket(*msg);
+ {
+ // We will not start tracking time until we have been asked for html at least once.
+ // Time tracking is only used for presenting pending messages for debugging.
+ TimePoint now = (_trackTime.load(std::memory_order_relaxed)) ? currentTime() : TimePoint();
+ std::lock_guard guard(_lock);
+ _messages.emplace(now, msg->getType().getId(), msg->getPriority(), msg->getMsgId(),
+ bucket, msg->getAddress()->getIndex());
+
+ _nodeInfo.incPending(msg->getAddress()->getIndex());
+ }
LOG(debug, "Sending message %s with id %" PRIu64 " to %s",
msg->toString().c_str(), msg->getMsgId(), msg->getAddress()->toString().c_str());
@@ -111,15 +121,13 @@ PendingMessageTracker::insert(const std::shared_ptr<api::StorageMessage>& msg)
document::Bucket
PendingMessageTracker::reply(const api::StorageReply& r)
{
- std::unique_lock guard(_lock);
document::Bucket bucket;
-
LOG(debug, "Got reply: %s", r.toString().c_str());
uint64_t msgId = r.getMsgId();
+ std::unique_lock guard(_lock);
MessagesByMsgId& msgs = boost::multi_index::get<0>(_messages);
MessagesByMsgId::iterator iter = msgs.find(msgId);
-
if (iter != msgs.end()) {
bucket = iter->bucket;
_nodeInfo.decPending(r.getAddress()->getIndex());
@@ -127,7 +135,6 @@ PendingMessageTracker::reply(const api::StorageReply& r)
if (code == api::ReturnCode::BUSY || code == api::ReturnCode::TIMEOUT) {
_nodeInfo.setBusy(r.getAddress()->getIndex(), _nodeBusyDuration);
}
- LOG(debug, "Erased message with id %" PRIu64 " for bucket %s", msgId, bucket.toString().c_str());
msgs.erase(msgId);
auto deferred_tasks = get_deferred_ops_if_bucket_writes_drained(bucket);
// Deferred tasks may try to send messages, which in turn will invoke the PendingMessageTracker.
@@ -139,6 +146,7 @@ PendingMessageTracker::reply(const api::StorageReply& r)
for (auto& task : deferred_tasks) {
task->run(TaskRunState::OK);
}
+ LOG(debug, "Erased message with id %" PRIu64 " for bucket %s", msgId, bucket.toString().c_str());
}
return bucket;
@@ -328,6 +336,7 @@ PendingMessageTracker::getStatusPerNode(std::ostream& out) const
void
PendingMessageTracker::reportHtmlStatus(std::ostream& out, const framework::HttpUrlPath& path) const
{
+ _trackTime.store(true, std::memory_order_relaxed);
if (!path.hasAttribute("order")) {
getStatusStartPage(out);
} else if (path.getAttribute("order") == "bucket") {
diff --git a/storage/src/vespa/storage/distributor/pendingmessagetracker.h b/storage/src/vespa/storage/distributor/pendingmessagetracker.h
index fb672d5ee31..4b5655d3f3c 100644
--- a/storage/src/vespa/storage/distributor/pendingmessagetracker.h
+++ b/storage/src/vespa/storage/distributor/pendingmessagetracker.h
@@ -178,11 +178,12 @@ private:
document::Bucket::hash
>;
- Messages _messages;
- framework::Component _component;
- NodeInfo _nodeInfo;
- vespalib::duration _nodeBusyDuration;
- DeferredBucketTaskMap _deferred_read_tasks;
+ Messages _messages;
+ framework::Component _component;
+ NodeInfo _nodeInfo;
+ vespalib::duration _nodeBusyDuration;
+ DeferredBucketTaskMap _deferred_read_tasks;
+ mutable std::atomic<bool> _trackTime;
// Since distributor is currently single-threaded, this will only
// contend when status page is being accessed. It is, however, required
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
index fe384a68d72..a4295613fd2 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
@@ -65,9 +65,7 @@ PersistenceMessageTrackerImpl::fail(MessageSender& sender, const api::ReturnCode
}
uint16_t
-PersistenceMessageTrackerImpl::receiveReply(
- MessageSender& sender,
- api::BucketInfoReply& reply)
+PersistenceMessageTrackerImpl::receiveReply(MessageSender& sender, api::BucketInfoReply& reply)
{
uint16_t node = handleReply(reply);
@@ -79,9 +77,7 @@ PersistenceMessageTrackerImpl::receiveReply(
}
void
-PersistenceMessageTrackerImpl::revert(
- MessageSender& sender,
- const std::vector<BucketNodePair>& revertNodes)
+PersistenceMessageTrackerImpl::revert(MessageSender& sender, const std::vector<BucketNodePair>& revertNodes)
{
if (_revertTimestamp != 0) {
// Since we're reverting, all received bucket info is voided.
@@ -156,24 +152,18 @@ PersistenceMessageTrackerImpl::canSendReplyEarly() const
}
void
-PersistenceMessageTrackerImpl::addBucketInfoFromReply(
- uint16_t node,
- const api::BucketInfoReply& reply)
+PersistenceMessageTrackerImpl::addBucketInfoFromReply(uint16_t node, const api::BucketInfoReply& reply)
{
document::Bucket bucket(reply.getBucket());
const api::BucketInfo& bucketInfo(reply.getBucketInfo());
if (reply.hasBeenRemapped()) {
LOG(debug, "Bucket %s: Received remapped bucket info %s from node %d",
- bucket.toString().c_str(),
- bucketInfo.toString().c_str(),
- node);
+ bucket.toString().c_str(), bucketInfo.toString().c_str(), node);
_remapBucketInfo[bucket].emplace_back(_op_ctx.generate_unique_timestamp(), node, bucketInfo);
} else {
LOG(debug, "Bucket %s: Received bucket info %s from node %d",
- bucket.toString().c_str(),
- bucketInfo.toString().c_str(),
- node);
+ bucket.toString().c_str(), bucketInfo.toString().c_str(), node);
_bucketInfo[bucket].emplace_back(_op_ctx.generate_unique_timestamp(), node, bucketInfo);
}
}
@@ -182,17 +172,12 @@ void
PersistenceMessageTrackerImpl::logSuccessfulReply(uint16_t node, const api::BucketInfoReply& reply) const
{
LOG(spam, "Bucket %s: Received successful reply %s",
- reply.getBucketId().toString().c_str(),
- reply.toString().c_str());
+ reply.getBucketId().toString().c_str(), reply.toString().c_str());
if (!reply.getBucketInfo().valid()) {
- LOG(error,
- "Reply %s from node %d contained invalid bucket "
- "information %s. This is a bug! Please report "
- "this to the Vespa team",
- reply.toString().c_str(),
- node,
- reply.getBucketInfo().toString().c_str());
+ LOG(error, "Reply %s from node %d contained invalid bucket information %s. This is a bug! "
+ "Please report this to the Vespa team",
+ reply.toString().c_str(), node, reply.getBucketInfo().toString().c_str());
}
}
@@ -236,12 +221,8 @@ void
PersistenceMessageTrackerImpl::updateFailureResult(const api::BucketInfoReply& reply)
{
LOG(debug, "Bucket %s: Received failed reply %s with result %s",
- reply.getBucketId().toString().c_str(),
- reply.toString().c_str(),
- reply.getResult().toString().c_str());
- if (reply.getResult().getResult() >
- _reply->getResult().getResult())
- {
+ reply.getBucketId().toString().c_str(), reply.toString().c_str(), reply.getResult().toString().c_str());
+ if (reply.getResult().getResult() > _reply->getResult().getResult()) {
_reply->setResult(reply.getResult());
}
@@ -249,12 +230,9 @@ PersistenceMessageTrackerImpl::updateFailureResult(const api::BucketInfoReply& r
}
void
-PersistenceMessageTrackerImpl::handleCreateBucketReply(
- api::BucketInfoReply& reply,
- uint16_t node)
+PersistenceMessageTrackerImpl::handleCreateBucketReply(api::BucketInfoReply& reply, uint16_t node)
{
- LOG(spam, "Received CreateBucket reply for %s from node %u",
- reply.getBucketId().toString().c_str(), node);
+ LOG(spam, "Received CreateBucket reply for %s from node %u", reply.getBucketId().toString().c_str(), node);
if (!reply.getResult().success()
&& reply.getResult().getResult() != api::ReturnCode::EXISTS)
{
@@ -271,9 +249,7 @@ PersistenceMessageTrackerImpl::handleCreateBucketReply(
}
void
-PersistenceMessageTrackerImpl::handlePersistenceReply(
- api::BucketInfoReply& reply,
- uint16_t node)
+PersistenceMessageTrackerImpl::handlePersistenceReply(api::BucketInfoReply& reply, uint16_t node)
{
++_n_persistence_replies_total;
if (reply.getBucketInfo().valid()) {
@@ -298,10 +274,7 @@ PersistenceMessageTrackerImpl::transfer_trace_state_to_reply()
}
void
-PersistenceMessageTrackerImpl::updateFromReply(
- MessageSender& sender,
- api::BucketInfoReply& reply,
- uint16_t node)
+PersistenceMessageTrackerImpl::updateFromReply(MessageSender& sender, api::BucketInfoReply& reply, uint16_t node)
{
_trace.addChild(reply.steal_trace());
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.h b/storage/src/vespa/storage/distributor/persistencemessagetracker.h
index ecc4732696b..9b06547dd98 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.h
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.h
@@ -8,7 +8,6 @@
#include <vespa/storageapi/messageapi/bucketinfocommand.h>
#include <vespa/storageapi/messageapi/bucketinforeply.h>
-
namespace storage::distributor {
struct PersistenceMessageTracker {
diff --git a/storage/src/vespa/storage/distributor/statechecker.cpp b/storage/src/vespa/storage/distributor/statechecker.cpp
index 27a60b73716..cd8b6e934d4 100644
--- a/storage/src/vespa/storage/distributor/statechecker.cpp
+++ b/storage/src/vespa/storage/distributor/statechecker.cpp
@@ -51,13 +51,11 @@ public:
StateChecker::Result
StateChecker::Result::noMaintenanceNeeded()
{
- return Result(std::unique_ptr<ResultImpl>());
+ return Result({});
}
StateChecker::Result
-StateChecker::Result::createStoredResult(
- IdealStateOperation::UP operation,
- MaintenancePriority::Priority priority)
+StateChecker::Result::createStoredResult(IdealStateOperation::UP operation, MaintenancePriority::Priority priority)
{
return Result(std::make_unique<StoredResultImpl>(std::move(operation), MaintenancePriority(priority)));
}
@@ -74,15 +72,13 @@ StateChecker::Context::Context(const DistributorNodeContext& node_ctx_in,
distributorConfig(op_ctx_in.distributor_config()),
distribution(distributorBucketSpace.getDistribution()),
gcTimeCalculator(op_ctx_in.bucket_id_hasher(), distributorConfig.getGarbageCollectionInterval()),
+ idealStateBundle(distributorBucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId())),
node_ctx(node_ctx_in),
op_ctx(op_ctx_in),
db(distributorBucketSpace.getBucketDatabase()),
stats(statsTracker),
merges_inhibited_in_bucket_space(distributorBucketSpace.merges_inhibited())
-{
- idealState = distributorBucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).get_available_nonretired_or_maintenance_nodes();
- unorderedIdealState.insert(idealState.begin(), idealState.end());
-}
+{ }
StateChecker::Context::~Context() = default;
diff --git a/storage/src/vespa/storage/distributor/statechecker.h b/storage/src/vespa/storage/distributor/statechecker.h
index 830e05676be..25918e7a047 100644
--- a/storage/src/vespa/storage/distributor/statechecker.h
+++ b/storage/src/vespa/storage/distributor/statechecker.h
@@ -2,6 +2,7 @@
#pragma once
#include "bucketgctimecalculator.h"
+#include "ideal_service_layer_nodes_bundle.h"
#include <vespa/storage/distributor/maintenance/maintenancepriority.h>
#include <vespa/storage/distributor/operations/idealstate/idealstateoperation.h>
#include <vespa/storage/common/storagecomponent.h>
@@ -63,28 +64,20 @@ public:
std::vector<BucketDatabase::Entry> entries;
// Common
- const lib::ClusterState& systemState;
- const lib::ClusterState* pending_cluster_state; // nullptr if no state is pending.
- const DistributorConfiguration& distributorConfig;
- const lib::Distribution& distribution;
- BucketGcTimeCalculator gcTimeCalculator;
-
- // Separate ideal state into ordered sequence and unordered set, as we
- // need to both know the actual order (activation prioritization etc) as
- // well as have the ability to quickly check if a node is in an ideal
- // location.
- std::vector<uint16_t> idealState;
- vespalib::hash_set<uint16_t> unorderedIdealState;
-
- const DistributorNodeContext& node_ctx;
- const DistributorStripeOperationContext& op_ctx;
- const BucketDatabase& db;
- NodeMaintenanceStatsTracker& stats;
- const bool merges_inhibited_in_bucket_space;
-
- const BucketDatabase::Entry& getSiblingEntry() const noexcept {
- return siblingEntry;
- }
+ const lib::ClusterState & systemState;
+ const lib::ClusterState * pending_cluster_state; // nullptr if no state is pending.
+ const DistributorConfiguration & distributorConfig;
+ const lib::Distribution & distribution;
+ BucketGcTimeCalculator gcTimeCalculator;
+ const IdealServiceLayerNodesBundle & idealStateBundle;
+ const DistributorNodeContext & node_ctx;
+ const DistributorStripeOperationContext & op_ctx;
+ const BucketDatabase & db;
+ NodeMaintenanceStatsTracker & stats;
+ const bool merges_inhibited_in_bucket_space;
+
+ const BucketDatabase::Entry& getSiblingEntry() const noexcept { return siblingEntry; }
+ const std::vector<uint16_t> & idealState() const noexcept { return idealStateBundle.available_nonretired_or_maintenance_nodes(); }
document::Bucket getBucket() const noexcept { return bucket; }
document::BucketId getBucketId() const noexcept { return bucket.getBucketId(); }
@@ -107,28 +100,19 @@ public:
std::unique_ptr<ResultImpl> _impl;
public:
IdealStateOperation::UP createOperation() {
- return (_impl
- ? _impl->createOperation()
- : IdealStateOperation::UP());
+ return (_impl ? _impl->createOperation() : IdealStateOperation::UP());
}
MaintenancePriority getPriority() const {
- return (_impl
- ? _impl->getPriority()
- : MaintenancePriority());
+ return (_impl ? _impl->getPriority() : MaintenancePriority());
}
MaintenanceOperation::Type getType() const {
- return (_impl
- ? _impl->getType()
- : MaintenanceOperation::OPERATION_COUNT);
-
+ return (_impl ? _impl->getType() : MaintenanceOperation::OPERATION_COUNT);
}
static Result noMaintenanceNeeded();
- static Result createStoredResult(
- IdealStateOperation::UP operation,
- MaintenancePriority::Priority priority);
+ static Result createStoredResult(IdealStateOperation::UP operation, MaintenancePriority::Priority priority);
private:
explicit Result(std::unique_ptr<ResultImpl> impl)
: _impl(std::move(impl))
diff --git a/storage/src/vespa/storage/distributor/statecheckers.cpp b/storage/src/vespa/storage/distributor/statecheckers.cpp
index fe1f4422c45..43766225155 100644
--- a/storage/src/vespa/storage/distributor/statecheckers.cpp
+++ b/storage/src/vespa/storage/distributor/statecheckers.cpp
@@ -27,9 +27,7 @@ SplitBucketStateChecker::validForSplit(Context& c)
{
// Can't split if we have no nodes.
if (c.entry->getNodeCount() == 0) {
- LOG(spam,
- "Can't split bucket %s, since it has no copies",
- c.bucket.toString().c_str());
+ LOG(spam, "Can't split bucket %s, since it has no copies", c.bucket.toString().c_str());
return false;
}
@@ -83,33 +81,21 @@ SplitBucketStateChecker::getBucketSizeRelativeToMax(Context& c)
}
StateChecker::Result
-SplitBucketStateChecker::generateMinimumBucketSplitOperation(
- Context& c)
+SplitBucketStateChecker::generateMinimumBucketSplitOperation(Context& c)
{
- auto so = std::make_unique<SplitOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), c.entry->getNodes()),
- c.distributorConfig.getMinimalBucketSplit(),
- 0,
- 0);
+ auto so = std::make_unique<SplitOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), c.entry->getNodes()),
+ c.distributorConfig.getMinimalBucketSplit(), 0, 0);
so->setPriority(c.distributorConfig.getMaintenancePriorities().splitDistributionBits);
- so->setDetailedReason(
- "[Splitting bucket because the current system size requires "
- "a higher minimum split bit]");
+ so->setDetailedReason("[Splitting bucket because the current system size requires a higher minimum split bit]");
return Result::createStoredResult(std::move(so), MaintenancePriority::MEDIUM);
}
StateChecker::Result
-SplitBucketStateChecker::generateMaxSizeExceededSplitOperation(
- Context& c)
+SplitBucketStateChecker::generateMaxSizeExceededSplitOperation(Context& c)
{
- auto so = std::make_unique<SplitOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), c.entry->getNodes()),
- 58,
- c.distributorConfig.getSplitCount(),
- c.distributorConfig.getSplitSize());
+ auto so = std::make_unique<SplitOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), c.entry->getNodes()), 58,
+ c.distributorConfig.getSplitCount(), c.distributorConfig.getSplitSize());
so->setPriority(c.distributorConfig.getMaintenancePriorities().splitLargeBucket);
@@ -160,8 +146,7 @@ JoinBucketsStateChecker::isFirstSibling(const document::BucketId& bucketId)
namespace {
bool
-equalNodeSet(const std::vector<uint16_t>& idealState,
- const BucketDatabase::Entry& dbEntry)
+equalNodeSet(const std::vector<uint16_t>& idealState, const BucketDatabase::Entry& dbEntry)
{
if (idealState.size() != dbEntry->getNodeCount()) {
return false;
@@ -179,12 +164,10 @@ equalNodeSet(const std::vector<uint16_t>& idealState,
bool
bucketAndSiblingReplicaLocationsEqualIdealState(const StateChecker::Context& context)
{
- if (!equalNodeSet(context.idealState, context.entry)) {
+ if (!equalNodeSet(context.idealState(), context.entry)) {
return false;
}
- std::vector<uint16_t> siblingIdealState(
- context.distribution.getIdealStorageNodes(
- context.systemState, context.siblingBucket));
+ std::vector<uint16_t> siblingIdealState = context.distribution.getIdealStorageNodes(context.systemState, context.siblingBucket);
if (!equalNodeSet(siblingIdealState, context.siblingEntry)) {
return false;
}
@@ -213,41 +196,29 @@ JoinBucketsStateChecker::siblingsAreInSync(const Context& context)
const auto& siblingEntry(context.siblingEntry);
if (entry->getNodeCount() != siblingEntry->getNodeCount()) {
- LOG(spam,
- "Not joining bucket %s because sibling bucket %s had different "
- "node count",
- context.bucket.toString().c_str(),
- context.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because sibling bucket %s had different node count",
+ context.bucket.toString().c_str(), context.siblingBucket.toString().c_str());
return false;
}
bool siblingsCoLocated = true;
for (uint32_t i = 0; i < entry->getNodeCount(); ++i) {
- if (entry->getNodeRef(i).getNode()
- != siblingEntry->getNodeRef(i).getNode())
- {
+ if (entry->getNodeRef(i).getNode() != siblingEntry->getNodeRef(i).getNode()) {
siblingsCoLocated = false;
break;
}
}
if (!siblingsCoLocated && !inconsistentJoinIsAllowed(context)) {
- LOG(spam,
- "Not joining bucket %s because sibling bucket %s "
- "does not have the same node set, or inconsistent joins cannot be "
- "performed either due to config or because replicas were not in "
- "their ideal location",
- context.bucket.toString().c_str(),
- context.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because sibling bucket %s does not have the same node set, or inconsistent "
+ "joins cannot be performed either due to config or because replicas were not in their ideal location",
+ context.bucket.toString().c_str(), context.siblingBucket.toString().c_str());
return false;
}
if (!entry->validAndConsistent() || !siblingEntry->validAndConsistent()) {
- LOG(spam,
- "Not joining bucket %s because it or %s is out of sync "
- "and syncing it may cause it to become too large",
- context.bucket.toString().c_str(),
- context.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because it or %s is out of sync and syncing it may cause it to become too large",
+ context.bucket.toString().c_str(), context.siblingBucket.toString().c_str());
return false;
}
@@ -291,9 +262,8 @@ contextBucketHasTooManyReplicas(const StateChecker::Context& c)
bool
bucketAtDistributionBitLimit(const document::BucketId& bucket, const StateChecker::Context& c)
{
- return (bucket.getUsedBits() <= std::max(
- uint32_t(c.systemState.getDistributionBitCount()),
- c.distributorConfig.getMinimalBucketSplit()));
+ return (bucket.getUsedBits() <= std::max(uint32_t(c.systemState.getDistributionBitCount()),
+ c.distributorConfig.getMinimalBucketSplit()));
}
}
@@ -302,31 +272,23 @@ bool
JoinBucketsStateChecker::shouldJoin(const Context& c)
{
if (c.entry->getNodeCount() == 0) {
- LOG(spam, "Not joining bucket %s because it has no nodes",
- c.bucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because it has no nodes", c.bucket.toString().c_str());
return false;
}
if (contextBucketHasTooManyReplicas(c)) {
- LOG(spam, "Not joining %s because it has too high replication level",
- c.bucket.toString().c_str());
+ LOG(spam, "Not joining %s because it has too high replication level", c.bucket.toString().c_str());
return false;
}
if (c.distributorConfig.getJoinSize() == 0 && c.distributorConfig.getJoinCount() == 0) {
- LOG(spam, "Not joining bucket %s because join is disabled",
- c.bucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because join is disabled", c.bucket.toString().c_str());
return false;
}
if (bucketAtDistributionBitLimit(c.getBucketId(), c)) {
- LOG(spam,
- "Not joining bucket %s because it is below the min split "
- "count (config: %u, cluster state: %u, bucket has: %u)",
- c.bucket.toString().c_str(),
- c.distributorConfig.getMinimalBucketSplit(),
- c.systemState.getDistributionBitCount(),
- c.getBucketId().getUsedBits());
+ LOG(spam, "Not joining bucket %s because it is below the min split count (config: %u, cluster state: %u, bucket has: %u)",
+ c.bucket.toString().c_str(), c.distributorConfig.getMinimalBucketSplit(), c.systemState.getDistributionBitCount(), c.getBucketId().getUsedBits());
return false;
}
@@ -336,11 +298,8 @@ JoinBucketsStateChecker::shouldJoin(const Context& c)
if (c.getSiblingEntry().valid()) {
if (!isFirstSibling(c.getBucketId())) {
- LOG(spam,
- "Not joining bucket %s because it is the second sibling of "
- "%s and not the first",
- c.bucket.toString().c_str(),
- c.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because it is the second sibling of %s and not the first",
+ c.bucket.toString().c_str(), c.siblingBucket.toString().c_str());
return false;
}
if (!siblingsAreInSync(c)) {
@@ -463,24 +422,13 @@ JoinBucketsStateChecker::check(Context& c) const
sourceBuckets.push_back(c.getBucketId());
}
sourceBuckets.push_back(c.getBucketId());
- auto op = std::make_unique<JoinOperation>(
- c.node_ctx,
- BucketAndNodes(joinedBucket, c.entry->getNodes()),
- sourceBuckets);
+ auto op = std::make_unique<JoinOperation>(c.node_ctx, BucketAndNodes(joinedBucket, c.entry->getNodes()), sourceBuckets);
op->setPriority(c.distributorConfig.getMaintenancePriorities().joinBuckets);
vespalib::asciistream ost;
- ost << "[Joining buckets "
- << sourceBuckets[1].toString()
- << " and " << sourceBuckets[0].toString()
- << " because their size ("
- << getTotalUsedFileSize(c)
- << " bytes, "
- << getTotalMetaCount(c)
- << " docs) is less than the configured limit of ("
- << c.distributorConfig.getJoinSize()
- << ", "
- << c.distributorConfig.getJoinCount()
- << ")";
+ ost << "[Joining buckets " << sourceBuckets[1].toString() << " and " << sourceBuckets[0].toString()
+ << " because their size (" << getTotalUsedFileSize(c) << " bytes, "
+ << getTotalMetaCount(c) << " docs) is less than the configured limit of ("
+ << c.distributorConfig.getJoinSize() << ", " << c.distributorConfig.getJoinCount() << ")";
op->setDetailedReason(ost.str());
@@ -516,8 +464,7 @@ vespalib::string
SplitInconsistentStateChecker::getReason(const document::BucketId& bucketId, const std::vector<BucketDatabase::Entry>& entries)
{
vespalib::asciistream reason;
- reason << "[Bucket is inconsistently split (list includes "
- << vespalib::hex << "0x" << bucketId.getId();
+ reason << "[Bucket is inconsistently split (list includes " << vespalib::hex << "0x" << bucketId.getId();
for (uint32_t i = 0, found = 0; i < entries.size() && found < 3; i++) {
if (!(entries[i].getBucketId() == bucketId)) {
@@ -530,10 +477,7 @@ SplitInconsistentStateChecker::getReason(const document::BucketId& bucketId, con
reason << " and " << vespalib::dec << entries.size() - 4 << " others";
}
- reason << ") Splitting it to improve the problem (max used bits "
- << vespalib::dec
- << getHighestUsedBits(entries)
- << ")]";
+ reason << ") Splitting it to improve the problem (max used bits " << vespalib::dec << getHighestUsedBits(entries) << ")]";
return reason.str();
}
@@ -559,12 +503,8 @@ SplitInconsistentStateChecker::check(Context& c) const
return Result::noMaintenanceNeeded();
}
- auto op = std::make_unique<SplitOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), c.entry->getNodes()),
- getHighestUsedBits(c.entries),
- 0,
- 0);
+ auto op = std::make_unique<SplitOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), c.entry->getNodes()),
+ getHighestUsedBits(c.entries), 0, 0);
op->setPriority(c.distributorConfig.getMaintenancePriorities().splitInconsistentBucket);
op->setDetailedReason(getReason(c.getBucketId(), c.entries));
@@ -576,8 +516,7 @@ namespace {
bool containsMaintenanceNode(const std::vector<uint16_t>& ideal, const StateChecker::Context& c)
{
for (uint16_t n : ideal) {
- lib::Node node(lib::NodeType::STORAGE, n);
- if (c.systemState.getNodeState(node).getState() == lib::State::MAINTENANCE) {
+ if (c.systemState.getNodeState(lib::Node(lib::NodeType::STORAGE, n)).getState() == lib::State::MAINTENANCE) {
return true;
}
}
@@ -588,9 +527,8 @@ bool ideal_node_is_unavailable_in_pending_state(const StateChecker::Context& c)
if (!c.pending_cluster_state) {
return false;
}
- for (uint16_t n : c.idealState) {
- lib::Node node(lib::NodeType::STORAGE, n);
- if (!c.pending_cluster_state->getNodeState(node).getState().oneOf("uir")){
+ for (uint16_t n : c.idealState()) {
+ if (!c.pending_cluster_state->getNodeState(lib::Node(lib::NodeType::STORAGE, n)).getState().oneOf("uir")){
return true;
}
}
@@ -598,9 +536,7 @@ bool ideal_node_is_unavailable_in_pending_state(const StateChecker::Context& c)
}
bool
-consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(
- const std::vector<uint16_t>& idealNodes,
- const BucketInfo& entry)
+consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(const std::vector<uint16_t>& idealNodes, const BucketInfo& entry)
{
api::BucketInfo info;
for (uint32_t i=0, n=entry.getNodeCount(); i<n; ++i) {
@@ -720,9 +656,9 @@ private:
MergeNodes::~MergeNodes() = default;
bool
-presentInIdealState(const StateChecker::Context& c, uint16_t node)
+presentInIdealState(const StateChecker::Context& c, uint16_t node) noexcept
{
- return c.unorderedIdealState.find(node) != c.unorderedIdealState.end();
+ return c.idealStateBundle.is_nonretired_or_maintenance(node);
}
void
@@ -730,7 +666,7 @@ addStatisticsForNonIdealNodes(const StateChecker::Context& c, bool missingReplic
{
// Common case is that ideal state == actual state with no missing replicas.
// If so, do nothing.
- if (!missingReplica && (c.idealState.size() == c.entry->getNodeCount())) {
+ if (!missingReplica && (c.idealState().size() == c.entry->getNodeCount())) {
return;
}
for (uint32_t j = 0; j < c.entry->getNodeCount(); ++j) {
@@ -753,26 +689,23 @@ checkForNodesMissingFromIdealState(StateChecker::Context& c)
// Check if we need to add copies to get to ideal state.
if (!c.entry->emptyAndConsistent()) {
bool hasMissingReplica = false;
- for (uint32_t i = 0; i < c.idealState.size(); i++) {
+ for (uint16_t node : c.idealState()) {
bool found = false;
for (uint32_t j = 0; j < c.entry->getNodeCount(); j++) {
- if (c.entry->getNodeRef(j).getNode() == c.idealState[i]) {
+ if (c.entry->getNodeRef(j).getNode() == node) {
found = true;
break;
}
}
if (!found) {
- const DistributorConfiguration::MaintenancePriorities& mp(
- c.distributorConfig.getMaintenancePriorities());
- if (c.idealState.size() > c.entry->getNodeCount()) {
- ret.markMissingReplica(c.idealState[i],
- mp.mergeTooFewCopies);
+ const auto & mp = c.distributorConfig.getMaintenancePriorities();
+ if (c.idealState().size() > c.entry->getNodeCount()) {
+ ret.markMissingReplica(node, mp.mergeTooFewCopies);
} else {
- ret.markMoveToIdealLocation(c.idealState[i],
- mp.mergeMoveToIdealNode);
+ ret.markMoveToIdealLocation(node,mp.mergeMoveToIdealNode);
}
- c.stats.incCopyingIn(c.idealState[i], c.getBucketSpace());
+ c.stats.incCopyingIn(node, c.getBucketSpace());
hasMissingReplica = true;
}
}
@@ -795,12 +728,8 @@ MergeNodes
checkIfBucketsAreOutOfSyncAndNeedMerging(StateChecker::Context& c)
{
MergeNodes ret;
- if (!consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(
- c.idealState,
- c.entry.getBucketInfo()))
- {
- auto pri(c.distributorConfig.getMaintenancePriorities()
- .mergeOutOfSyncCopies);
+ if (!consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(c.idealState(),c.entry.getBucketInfo())) {
+ auto pri(c.distributorConfig.getMaintenancePriorities().mergeOutOfSyncCopies);
ret.markOutOfSync(c, pri);
addStatisticsForOutOfSyncCopies(c);
}
@@ -839,7 +768,7 @@ SynchronizeAndMoveStateChecker::check(Context& c) const
if (isInconsistentlySplit(c)) {
return Result::noMaintenanceNeeded();
}
- if (containsMaintenanceNode(c.idealState, c)) {
+ if (containsMaintenanceNode(c.idealState(), c)) {
return Result::noMaintenanceNeeded();
}
if (ideal_node_is_unavailable_in_pending_state(c)) {
@@ -863,8 +792,7 @@ SynchronizeAndMoveStateChecker::check(Context& c) const
if ((c.getBucketSpace() == document::FixedBucketSpaces::default_space())
|| !c.distributorConfig.prioritize_global_bucket_merges())
{
- schedPri = (result.needsMoveOnly() ? MaintenancePriority::LOW
- : MaintenancePriority::MEDIUM);
+ schedPri = (result.needsMoveOnly() ? MaintenancePriority::LOW : MaintenancePriority::MEDIUM);
op->setPriority(result.priority());
} else {
// Since the default bucket space has a dependency on the global bucket space,
@@ -876,10 +804,8 @@ SynchronizeAndMoveStateChecker::check(Context& c) const
return Result::createStoredResult(std::move(op), schedPri);
} else {
- LOG(spam, "Bucket %s: No need for merge, as bucket is in consistent state "
- "(or inconsistent buckets are empty) %s",
- c.bucket.toString().c_str(),
- c.entry->toString().c_str());
+ LOG(spam, "Bucket %s: No need for merge, as bucket is in consistent state (or inconsistent buckets are empty) %s",
+ c.bucket.toString().c_str(), c.entry->toString().c_str());
return Result::noMaintenanceNeeded();
}
}
@@ -894,7 +820,7 @@ DeleteExtraCopiesStateChecker::bucketHasNoData(const Context& c)
bool
DeleteExtraCopiesStateChecker::copyIsInIdealState(const BucketCopy& cp, const Context& c)
{
- return hasItem(c.idealState, cp.getNode());
+ return hasItem(c.idealState(), cp.getNode());
}
bool
@@ -910,9 +836,7 @@ DeleteExtraCopiesStateChecker::addToRemoveSet(
std::vector<uint16_t>& removedCopies,
vespalib::asciistream& reasons)
{
- reasons << "[Removing " << reasonForRemoval
- << " from node " << copyToRemove.getNode()
- << ']';
+ reasons << "[Removing " << reasonForRemoval << " from node " << copyToRemove.getNode() << ']';
removedCopies.push_back(copyToRemove.getNode());
}
@@ -980,7 +904,7 @@ DeleteExtraCopiesStateChecker::check(Context& c) const
}
// Maintain symmetry with merge; don't try to mess with nodes that have an
// ideal copy on a node set in maintenance mode.
- if (containsMaintenanceNode(c.idealState, c)) {
+ if (containsMaintenanceNode(c.idealState(), c)) {
return Result::noMaintenanceNeeded();
}
@@ -988,8 +912,7 @@ DeleteExtraCopiesStateChecker::check(Context& c) const
std::vector<uint16_t> removedCopies;
if (bucketHasNoData(c)) {
- reasons << "[Removing all copies since bucket is empty:"
- << c.entry->toString() << "]";
+ reasons << "[Removing all copies since bucket is empty:" << c.entry->toString() << "]";
for (uint32_t j = 0, cnt = c.entry->getNodeCount(); j < cnt; ++j) {
removedCopies.push_back(c.entry->getNodeRef(j).getNode());
@@ -1003,9 +926,7 @@ DeleteExtraCopiesStateChecker::check(Context& c) const
}
if (!removedCopies.empty()) {
- auto ro = std::make_unique<RemoveBucketOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), removedCopies));
+ auto ro = std::make_unique<RemoveBucketOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), removedCopies));
ro->setPriority(c.distributorConfig.getMaintenancePriorities().deleteBucketCopy);
ro->setDetailedReason(reasons.str());
@@ -1029,7 +950,7 @@ BucketStateStateChecker::shouldSkipActivationDueToMaintenance(const ActiveList&
// If copy is not ready, we don't want to activate it if a node
// is set in maintenance. Doing so would imply that we want proton
// to start background indexing.
- return containsMaintenanceNode(c.idealState, c);
+ return containsMaintenanceNode(c.idealState(), c);
} // else: activation does not imply indexing, so we can safely do it at any time.
}
}
@@ -1057,9 +978,8 @@ BucketStateStateChecker::check(Context& c) const
return Result::noMaintenanceNeeded();
}
- ActiveList activeNodes(
- ActiveCopy::calculate(c.idealState, c.distribution, c.entry,
- c.distributorConfig.max_activation_inhibited_out_of_sync_groups()));
+ ActiveList activeNodes = ActiveCopy::calculate(c.idealState(), c.distribution, c.entry,
+ c.distributorConfig.max_activation_inhibited_out_of_sync_groups());
if (activeNodes.empty()) {
return Result::noMaintenanceNeeded();
}
@@ -1075,8 +995,7 @@ BucketStateStateChecker::check(Context& c) const
continue;
}
operationNodes.push_back(activeNodes[i]._nodeIndex);
- reason << "[Setting node " << activeNodes[i]._nodeIndex << " as active: "
- << activeNodes[i].getReason() << "]";
+ reason << "[Setting node " << activeNodes[i]._nodeIndex << " as active: " << activeNodes[i].getReason() << "]";
}
// Deactivate all copies that are currently marked as active.
@@ -1105,10 +1024,7 @@ BucketStateStateChecker::check(Context& c) const
for (uint32_t i=0; i<activeNodes.size(); ++i) {
activeNodeIndexes.push_back(activeNodes[i]._nodeIndex);
}
- auto op = std::make_unique<SetBucketStateOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), operationNodes),
- activeNodeIndexes);
+ auto op = std::make_unique<SetBucketStateOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), operationNodes), activeNodeIndexes);
// If activeNodes > 1, we're dealing with a active-per-leaf group case and
// we currently always send high pri activations.
@@ -1134,7 +1050,7 @@ GarbageCollectionStateChecker::needs_garbage_collection(const Context& c, vespal
if (c.entry->getNodeCount() == 0) {
return false;
}
- if (containsMaintenanceNode(c.idealState, c)) {
+ if (containsMaintenanceNode(c.idealState(), c)) {
return false;
}
std::chrono::seconds lastRunAt(c.entry->getLastGarbageCollectionTime());
diff --git a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp
index 9b7c4919403..4cc32a2fc3d 100644
--- a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp
+++ b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp
@@ -4,17 +4,14 @@
namespace storage::framework {
-HtmlStatusReporter::HtmlStatusReporter(vespalib::stringref id,
- vespalib::stringref name)
+HtmlStatusReporter::HtmlStatusReporter(vespalib::stringref id, vespalib::stringref name)
: StatusReporter(id, name)
-{
-}
+{ }
HtmlStatusReporter::~HtmlStatusReporter() = default;
void
-HtmlStatusReporter::reportHtmlHeader(std::ostream& out,
- const HttpUrlPath& path) const
+HtmlStatusReporter::reportHtmlHeader(std::ostream& out, const HttpUrlPath& path) const
{
out << "<html>\n"
<< "<head>\n"
@@ -26,8 +23,7 @@ HtmlStatusReporter::reportHtmlHeader(std::ostream& out,
}
void
-HtmlStatusReporter::reportHtmlFooter(std::ostream& out,
- const HttpUrlPath&) const
+HtmlStatusReporter::reportHtmlFooter(std::ostream& out, const HttpUrlPath&) const
{
out << "</body>\n</html>\n";
}
@@ -39,8 +35,7 @@ HtmlStatusReporter::getReportContentType(const HttpUrlPath&) const
}
bool
-HtmlStatusReporter::reportStatus(std::ostream& out,
- const HttpUrlPath& path) const
+HtmlStatusReporter::reportStatus(std::ostream& out, const HttpUrlPath& path) const
{
if (!isValidStatusRequest()) return false;
reportHtmlHeader(out, path);
diff --git a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h
index 4ffba20a3fa..ee3d65b0de3 100644
--- a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h
+++ b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h
@@ -29,8 +29,7 @@ struct HtmlStatusReporter : public StatusReporter {
* some code in the <head></head> part of the HTML, such as javascript
* functions.
*/
- virtual void reportHtmlHeaderAdditions(std::ostream&,
- const HttpUrlPath&) const {}
+ virtual void reportHtmlHeaderAdditions(std::ostream&, const HttpUrlPath&) const {}
/**
* Write a default HTML header. It writes the start of an HTML
diff --git a/vdslib/src/vespa/vdslib/distribution/distribution.cpp b/vdslib/src/vespa/vdslib/distribution/distribution.cpp
index 637a5089822..9dda360eea5 100644
--- a/vdslib/src/vespa/vdslib/distribution/distribution.cpp
+++ b/vdslib/src/vespa/vdslib/distribution/distribution.cpp
@@ -459,9 +459,7 @@ Distribution::getDefaultDistributionConfig(uint16_t redundancy, uint16_t nodeCou
}
std::vector<uint16_t>
-Distribution::getIdealStorageNodes(
- const ClusterState& state, const document::BucketId& bucket,
- const char* upStates) const
+Distribution::getIdealStorageNodes(const ClusterState& state, const document::BucketId& bucket, const char* upStates) const
{
std::vector<uint16_t> nodes;
getIdealNodes(NodeType::STORAGE, state, bucket, nodes, upStates);
@@ -469,10 +467,7 @@ Distribution::getIdealStorageNodes(
}
uint16_t
-Distribution::getIdealDistributorNode(
- const ClusterState& state,
- const document::BucketId& bucket,
- const char* upStates) const
+Distribution::getIdealDistributorNode(const ClusterState& state, const document::BucketId& bucket, const char* upStates) const
{
std::vector<uint16_t> nodes;
getIdealNodes(NodeType::DISTRIBUTOR, state, bucket, nodes, upStates);
diff --git a/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp b/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp
index 6d5b7ed8b05..77e46bbf9e8 100644
--- a/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp
+++ b/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp
@@ -9,7 +9,7 @@ namespace vespalib {
template<typename K, typename H, typename EQ, typename M>
template<typename InputIterator>
hash_set<K, H, EQ, M>::hash_set(InputIterator first, InputIterator last)
- : _ht(0)
+ : _ht(last - first)
{
insert(first, last);
}
@@ -18,7 +18,6 @@ template<typename K, typename H, typename EQ, typename M>
template<typename InputIt>
void
hash_set<K, H, EQ, M>::insert(InputIt first, InputIt last) {
- _ht.resize(last - first + capacity());
for (; first < last; first++) {
insert(*first);
}