summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--application-model/src/main/java/com/yahoo/vespa/archive/ArchiveStreamReader.java23
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java1
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/Xml.java7
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java9
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java11
-rw-r--r--config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorComplexTest.java119
-rw-r--r--config-application-package/src/test/resources/complex-app/deployment.xml140
-rw-r--r--config-application-package/src/test/resources/complex-app/services.xml268
-rw-r--r--config-model-api/abi-spec.json17
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/xml/package-info.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java28
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java24
-rw-r--r--container-core/src/main/java/com/yahoo/container/handler/threadpool/ThreadPoolMetric.java23
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java14
-rw-r--r--container-core/src/main/java/com/yahoo/metrics/ContainerMetrics.java32
-rw-r--r--container-core/src/test/java/com/yahoo/container/handler/threadpool/ContainerThreadPoolImplTest.java13
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java23
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java17
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/DeploymentQuotaCalculatorTest.java1
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java48
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java8
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/DefaultEnvWriter.java3
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java3
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java3
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandlerTest.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/DefaultEnvWriterTest.java9
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileFinderTest.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java35
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CachingCurator.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java68
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java7
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java13
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java6
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java104
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java13
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/active-nodes.json22
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2-nodes.json4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/content-nodes.json4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/enclave-nodes-recursive.json4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/enclave-nodes.json4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes-recursive-include-deprovisioned.json24
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes-recursive.json22
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes.json38
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/states-recursive.json22
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index_base.h2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_index_collection.h2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/field_inverter.h4
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/invert_task.h2
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h1
-rw-r--r--vespalib/CMakeLists.txt1
-rw-r--r--vespalib/src/apps/vespa-stress-and-validate-memory/.gitignore1
-rw-r--r--vespalib/src/apps/vespa-stress-and-validate-memory/CMakeLists.txt9
-rw-r--r--vespalib/src/apps/vespa-stress-and-validate-memory/stress_and_validate_memory.cpp262
-rw-r--r--vespalib/src/tests/data/smart_buffer/smart_buffer_test.cpp14
-rw-r--r--vespalib/src/vespa/vespalib/data/smart_buffer.h3
-rw-r--r--vespalib/src/vespa/vespalib/util/mmap_file_allocator.h6
66 files changed, 1209 insertions, 413 deletions
diff --git a/application-model/src/main/java/com/yahoo/vespa/archive/ArchiveStreamReader.java b/application-model/src/main/java/com/yahoo/vespa/archive/ArchiveStreamReader.java
index 87665efc1ef..acc18b4c13f 100644
--- a/application-model/src/main/java/com/yahoo/vespa/archive/ArchiveStreamReader.java
+++ b/application-model/src/main/java/com/yahoo/vespa/archive/ArchiveStreamReader.java
@@ -74,7 +74,7 @@ public class ArchiveStreamReader implements AutoCloseable {
outputStream.write(buffer, 0, read);
}
}
- return new ArchiveFile(path, crc32(entry), size);
+ return new ArchiveFile(path, size);
}
} catch (IOException e) {
throw new UncheckedIOException(e);
@@ -91,15 +91,10 @@ public class ArchiveStreamReader implements AutoCloseable {
public static class ArchiveFile {
private final Path path;
- private final OptionalLong crc32;
private final long size;
- public ArchiveFile(Path name, OptionalLong crc32, long size) {
+ public ArchiveFile(Path name, long size) {
this.path = Objects.requireNonNull(name);
- this.crc32 = Objects.requireNonNull(crc32);
- if (crc32.isPresent()) {
- requireNonNegative("crc32", crc32.getAsLong());
- }
this.size = requireNonNegative("size", size);
}
@@ -108,11 +103,6 @@ public class ArchiveStreamReader implements AutoCloseable {
return path;
}
- /** The CRC-32 checksum of this file, if any */
- public OptionalLong crc32() {
- return crc32;
- }
-
/** The decompressed size of this file */
public long size() {
return size;
@@ -120,15 +110,6 @@ public class ArchiveStreamReader implements AutoCloseable {
}
- /** Get the CRC-32 checksum of given archive entry, if any */
- private static OptionalLong crc32(ArchiveEntry entry) {
- long crc32 = -1;
- if (entry instanceof ZipArchiveEntry) {
- crc32 = ((ZipArchiveEntry) entry).getCrc();
- }
- return crc32 > -1 ? OptionalLong.of(crc32) : OptionalLong.empty();
- }
-
private static boolean isSymlink(ArchiveEntry entry) {
// Symlinks inside ZIP files are not part of the ZIP spec and are only supported by some implementations, such
// as Info-ZIP.
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
index 21bb193ef93..169b1c8557b 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
@@ -13,7 +13,6 @@ import org.w3c.dom.NamedNodeMap;
import javax.xml.transform.TransformerException;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/Xml.java b/config-application-package/src/main/java/com/yahoo/config/application/Xml.java
index 2cc5e1ac4e2..cf391f2dd0e 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/Xml.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/Xml.java
@@ -101,7 +101,12 @@ public class Xml {
}
StringWriter writer = new StringWriter();
transformer.transform(new DOMSource(document), new StreamResult(writer));
- return writer.toString();
+ String[] lines = writer.toString().split("\n");
+ var b = new StringBuilder();
+ for (String line : lines)
+ if ( ! line.isBlank())
+ b.append(line).append("\n");
+ return b.toString();
}
static String documentAsString(Document document) throws TransformerException {
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java
index 42333ea7662..7b99b19a9af 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java
@@ -43,15 +43,6 @@ public class XmlPreProcessor {
private final Tags tags;
private final List<PreProcessor> chain;
- // TODO: Remove after November 2022
- public XmlPreProcessor(File applicationDir,
- File xmlInput,
- InstanceName instance,
- Environment environment,
- RegionName region) throws IOException {
- this(applicationDir, new FileReader(xmlInput), instance, environment, region, Tags.empty());
- }
-
public XmlPreProcessor(File applicationDir,
File xmlInput,
InstanceName instance,
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java
index e61ea01a99a..2d91f811e8b 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java
@@ -11,7 +11,10 @@ import com.yahoo.config.application.api.ApplicationMetaData;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.application.api.ComponentInfo;
import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.config.application.api.DeploymentInstanceSpec;
+import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.UnparsedConfigDefinition;
+import com.yahoo.config.application.api.xml.DeploymentSpecXmlReader;
import com.yahoo.config.codegen.DefParser;
import com.yahoo.config.model.application.AbstractApplicationPackage;
import com.yahoo.config.provision.ApplicationId;
@@ -582,12 +585,16 @@ public class FilesApplicationPackage extends AbstractApplicationPackage {
private void preprocessXML(File destination, File inputXml, Zone zone) throws IOException {
if ( ! inputXml.exists()) return;
try {
+ InstanceName instance = metaData.getApplicationId().instance();
Document document = new XmlPreProcessor(appDir,
inputXml,
- metaData.getApplicationId().instance(),
+ instance,
zone.environment(),
zone.region(),
- metaData.getTags())
+ getDeployment().map(new DeploymentSpecXmlReader(false)::read)
+ .flatMap(spec -> spec.instance(instance))
+ .map(DeploymentInstanceSpec::tags)
+ .orElse(Tags.empty()))
.run();
try (FileOutputStream outputStream = new FileOutputStream(destination)) {
diff --git a/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorComplexTest.java b/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorComplexTest.java
new file mode 100644
index 00000000000..902b4126a29
--- /dev/null
+++ b/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorComplexTest.java
@@ -0,0 +1,119 @@
+package com.yahoo.config.application;
+
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.Tags;
+import com.yahoo.io.IOUtils;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.w3c.dom.Document;
+
+import javax.xml.transform.TransformerException;
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+
+public class HostedOverrideProcessorComplexTest {
+
+ private static final String servicesFile = "src/test/resources/complex-app/services.xml";
+
+ static {
+ XMLUnit.setIgnoreWhitespace(true);
+ }
+
+ @Test
+ public void testProdBetaUsWest2a() throws TransformerException, IOException {
+ String expected =
+ """
+ <?xml version="1.0" encoding="UTF-8" standalone="no"?>
+ <services xmlns:deploy="vespa" xmlns:preprocess="properties" version="1.0">
+ <container id="docsgateway" version="1.0">
+ <nodes count="3">
+ <resources disk="1800Gb" disk-speed="fast" memory="96Gb" storage-type="local" vcpu="48"/>
+ </nodes>
+ </container>
+ <container id="qrs" version="1.0">
+ <nodes count="3" required="true">
+ <resources disk="64Gb" memory="32Gb" vcpu="16"/>
+ </nodes>
+ <search/>
+ </container>
+ <container id="visitor" version="1.0">
+ <nodes count="2">
+ <resources disk="32Gb" memory="16Gb" vcpu="8"/>
+ </nodes>
+ <search/>
+ </container>
+ <content id="all" version="1.0">
+ <nodes count="3" groups="3" required="true">
+ <resources disk="1800Gb" disk-speed="fast" memory="96Gb" storage-type="local" vcpu="48"/>
+ </nodes>
+ <redundancy>1</redundancy>
+ </content>
+ <content id="filedocument" version="1.0">
+ <nodes count="2" groups="2">
+ <resources disk="32Gb" memory="8Gb" vcpu="4"/>
+ </nodes>
+ <redundancy>1</redundancy>
+ </content>
+ </services>
+ """;
+ assertOverride(InstanceName.from("beta1"),
+ Environment.prod,
+ RegionName.from("aws-us-west-2a"),
+ Tags.fromString("beta beta-prod beta-prod-cd"),
+ expected);
+ }
+
+ @Test
+ public void testProdBetaUsEast1b() throws TransformerException, IOException {
+ String expected =
+ """
+ <?xml version="1.0" encoding="UTF-8" standalone="no"?>
+ <services xmlns:deploy="vespa" xmlns:preprocess="properties" version="1.0">
+ <container id="docsgateway" version="1.0">
+ <nodes count="3">
+ <resources disk="1800Gb" disk-speed="fast" memory="96Gb" storage-type="local" vcpu="48"/>
+ </nodes>
+ </container>
+ <container id="qrs" version="1.0">
+ <nodes count="5" required="true">
+ <resources disk="64Gb" memory="32Gb" vcpu="16"/>
+ </nodes>
+ <search/>
+ </container>
+ <container id="visitor" version="1.0">
+ <nodes count="2">
+ <resources disk="32Gb" memory="16Gb" vcpu="8"/>
+ </nodes>
+ <search/>
+ </container>
+ <content id="all" version="1.0">
+ <nodes count="10" groups="10" required="true">
+ <resources disk="1800Gb" disk-speed="fast" memory="96Gb" storage-type="local" vcpu="48"/>
+ </nodes>
+ <redundancy>1</redundancy>
+ </content>
+ <content id="filedocument" version="1.0">
+ <nodes count="2" groups="2">
+ <resources disk="32Gb" memory="8Gb" vcpu="4"/>
+ </nodes>
+ <redundancy>1</redundancy>
+ </content>
+ </services>
+ """;
+ assertOverride(InstanceName.from("beta1"),
+ Environment.prod,
+ RegionName.from("aws-us-east-1b"),
+ Tags.fromString("beta beta-prod beta-prod-cd"),
+ expected);
+ }
+
+ private void assertOverride(InstanceName instance, Environment environment, RegionName region, Tags tags, String expected) throws TransformerException, IOException {
+ Document inputDoc = Xml.getDocument(IOUtils.createReader(servicesFile));
+ Document newDoc = new OverrideProcessor(instance, environment, region, tags).process(inputDoc);
+ assertEquals(expected, Xml.documentAsString(newDoc, true));
+ }
+
+}
diff --git a/config-application-package/src/test/resources/complex-app/deployment.xml b/config-application-package/src/test/resources/complex-app/deployment.xml
new file mode 100644
index 00000000000..1fa3a3a6f78
--- /dev/null
+++ b/config-application-package/src/test/resources/complex-app/deployment.xml
@@ -0,0 +1,140 @@
+<deployment version="1.0" major-version="8"
+ athenz-domain="gemini-native.aws-core-dev" athenz-service="csp"
+ cloud-account="337717828807">
+
+ <parallel>
+ <instance id="instance0" cloud-account="417744444827">
+ <test tester-flavor="d-8-16-10" />
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ </instance>
+
+ <instance id="instance1" cloud-account="417744444827">
+ <test tester-flavor="d-8-16-10" />
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ </instance>
+
+ <instance id="instance2" cloud-account="417744444827">
+ <test tester-flavor="d-8-16-10" />
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ </instance>
+
+ <instance id="instance3" cloud-account="417744444827">
+ <test tester-flavor="d-8-16-10" />
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ </instance>
+
+ <instance id="stress" cloud-account="417744444827">
+ <staging />
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ </instance>
+ </parallel>
+
+ <instance id="beta1" cloud-account="417744444827" tags="beta beta-prod beta-prod-cd">
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ <block-change version="true" revision="false" days="sat" hours="0-23" time-zone="UTC" />
+ <upgrade revision-change='when-clear' rollout='separate' revision-target='next' policy='conservative'/>
+ <prod>
+ <parallel>
+ <steps>
+ <region>aws-us-west-2a</region>
+ <test>aws-us-west-2a</test>
+ </steps>
+ <steps>
+ <region>aws-us-east-1b</region>
+ <test>aws-us-east-1b</test>
+ </steps>
+ <steps>
+ <region>aws-eu-west-1a</region>
+ <test>aws-eu-west-1a</test>
+ </steps>
+ <steps>
+ <region>aws-ap-southeast-1a</region>
+ <test>aws-ap-southeast-1a</test>
+ </steps>
+ </parallel>
+ </prod>
+ </instance>
+
+ <instance id="gamma5" cloud-account="417744444827" tags="gamma prod beta-prod beta-prod-cd">
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ <block-change version="true" revision="false" days="sat" hours="0-23" time-zone="UTC" />
+ <upgrade revision-change='when-clear' rollout='separate' revision-target='next' policy='conservative'/>
+ <prod>
+ <parallel>
+ <steps>
+ <region>aws-us-west-2a</region>
+ <test>aws-us-west-2a</test>
+ </steps>
+ <steps>
+ <region>aws-us-east-1b</region>
+ <test>aws-us-east-1b</test>
+ </steps>
+ <steps>
+ <region>aws-eu-west-1a</region>
+ <test>aws-eu-west-1a</test>
+ </steps>
+ <steps>
+ <region>aws-ap-southeast-1a</region>
+ <test>aws-ap-southeast-1a</test>
+ </steps>
+ </parallel>
+ </prod>
+ </instance>
+
+ <instance id="delta21" cloud-account="417744444827" tags="delta prod beta-prod beta-prod-cd">
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ <block-change version="true" revision="false" days="sat" hours="0-23" time-zone="UTC" />
+ <upgrade revision-change='when-clear' rollout='separate' revision-target='next' policy='conservative'/>
+ <prod>
+ <parallel>
+ <steps>
+ <region>aws-us-west-2a</region>
+ <test>aws-us-west-2a</test>
+ </steps>
+ <steps>
+ <region>aws-us-east-1b</region>
+ <test>aws-us-east-1b</test>
+ </steps>
+ <steps>
+ <region>aws-eu-west-1a</region>
+ <test>aws-eu-west-1a</test>
+ </steps>
+ <steps>
+ <region>aws-ap-southeast-1a</region>
+ <test>aws-ap-southeast-1a</test>
+ </steps>
+ </parallel>
+ </prod>
+ </instance>
+
+ <instance id="prod21a" cloud-account="417744444827" tags="prod-a prod beta-prod beta-prod-cd">
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ <block-change version="true" revision="false" days="sat" hours="0-23" time-zone="UTC" />
+ <upgrade revision-change='when-clear' rollout='separate' revision-target='next' policy='conservative'/>
+ </instance>
+
+ <instance id="prod21b" cloud-account="417744444827" tags="prod-b prod beta-prod beta-prod-cd">
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ <block-change version="true" revision="false" days="sat" hours="0-23" time-zone="UTC" />
+ <upgrade revision-change='when-clear' rollout='separate' revision-target='next' policy='conservative'/>
+ </instance>
+
+ <instance id="prod21c" cloud-account="417744444827" tags="prod-c prod beta-prod beta-prod-cd">
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ <block-change version="true" revision="false" days="sat" hours="0-23" time-zone="UTC" />
+ <upgrade revision-change='when-clear' rollout='separate' revision-target='next' policy='conservative'/>
+ </instance>
+
+ <instance id="cd10" cloud-account="417744444827" tags="cd beta-prod-cd">
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ <block-change version="true" revision="false" days="sat" hours="0-23" time-zone="UTC" />
+ <upgrade revision-change='when-clear' rollout='separate' revision-target='next' policy='conservative'/>
+ </instance>
+
+ <instance id="prod1" cloud-account="417744444827">
+ <block-change version="true" revision="false" days="mon-fri,sun" hours="4-23" time-zone="UTC" />
+ <block-change version="true" revision="false" days="sat" hours="0-23" time-zone="UTC" />
+ <upgrade revision-change='when-clear' rollout='separate' revision-target='next' policy='conservative'/>
+ </instance>
+
+</deployment>
diff --git a/config-application-package/src/test/resources/complex-app/services.xml b/config-application-package/src/test/resources/complex-app/services.xml
new file mode 100644
index 00000000000..23b5a90e5a2
--- /dev/null
+++ b/config-application-package/src/test/resources/complex-app/services.xml
@@ -0,0 +1,268 @@
+<services version="1.0" xmlns:deploy="vespa" xmlns:preprocess="properties">
+
+ <container id="docsgateway" version="1.0">
+
+ <nodes count="1" deploy:environment="dev" deploy:instance="staging1">
+ <resources vcpu="48" memory="96Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes count="1" deploy:environment="dev" deploy:instance="staging1-root">
+ <resources vcpu="48" memory="96Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes count="3">
+ <resources deploy:environment="prod perf" vcpu="48" memory="96Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ <resources vcpu="8" memory="16Gb" disk="32Gb" />
+ </nodes>
+
+ <nodes deploy:environment="staging" count="3">
+ <resources vcpu="8" memory="16Gb" disk="32Gb" />
+ </nodes>
+
+ </container>
+
+ <container id="qrs" version="1.0">
+ <nodes count="1" deploy:environment="dev" deploy:instance="staging1">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="1" deploy:environment="dev" deploy:instance="staging1-root">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2">
+ <resources vcpu="8" memory="16Gb" disk="10Gb" />
+ <resources deploy:environment="prod perf" vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="25" deploy:tags="gamma" deploy:region="aws-us-east-1b">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="cd" deploy:region="aws-ap-southeast-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="delta" deploy:region="aws-ap-southeast-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="prod-a" deploy:region="aws-ap-southeast-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="prod-b" deploy:region="aws-ap-southeast-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="prod-c" deploy:region="aws-ap-southeast-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="cd" deploy:region="aws-us-east-1b">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="delta" deploy:region="aws-us-east-1b">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="prod-a" deploy:region="aws-us-east-1b">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="prod-b" deploy:region="aws-us-east-1b">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="prod-c" deploy:region="aws-us-east-1b">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="cd" deploy:region="aws-us-west-2a aws-eu-west-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="delta" deploy:region="aws-us-west-2a aws-eu-west-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="prod-a" deploy:region="aws-us-west-2a aws-eu-west-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="prod-b" deploy:region="aws-us-west-2a aws-eu-west-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="2" deploy:tags="prod-c" deploy:region="aws-us-west-2a aws-eu-west-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="3" deploy:tags="beta" deploy:region="aws-us-west-2a aws-eu-west-1a aws-ap-southeast-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="5" deploy:tags="beta" deploy:region="aws-us-east-1b">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="5" deploy:tags="gamma" deploy:region="aws-us-west-2a aws-eu-west-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <nodes count="7" deploy:tags="gamma" deploy:region="aws-ap-southeast-1a">
+ <resources vcpu="16" memory="32Gb" disk="64Gb" />
+ </nodes>
+
+ <search/>
+
+ </container>
+
+ <container id="visitor" version="1.0">
+ <nodes count="2">
+ <resources vcpu="8" memory="16Gb" disk="32Gb" />
+ </nodes>
+
+ <search/>
+ </container>
+
+ <content id="all" version="1.0">
+ <nodes deploy:tags="gamma" deploy:region="aws-ap-southeast-1a" count="10" groups="10">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="beta" deploy:region="aws-us-east-1b" count="10" groups="10">
+ <resources vcpu="48" memory="96Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:environment="dev" deploy:instance="staging1" count="1" groups="1">
+ <resources vcpu="48" memory="96Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="gamma" deploy:region="aws-us-east-1b" count="25" groups="25">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes count="2" groups="2" />
+
+ <nodes deploy:environment="dev" deploy:instance="staging1-root" count="2" groups="2">
+ <resources vcpu="48" memory="96Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="cd" deploy:region="aws-ap-southeast-1a" count="2" groups="2">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="delta" deploy:region="aws-ap-southeast-1a" count="2" groups="2">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-a" deploy:region="aws-ap-southeast-1a" count="2" groups="2">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-b" deploy:region="aws-ap-southeast-1a" count="2" groups="2">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-c" deploy:region="aws-ap-southeast-1a" count="2" groups="2">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="cd" deploy:region="aws-eu-west-1a" count="2" groups="2">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="delta" deploy:region="aws-eu-west-1a" count="2" groups="2">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-a" deploy:region="aws-eu-west-1a" count="2" groups="2">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-b" deploy:region="aws-eu-west-1a" count="2" groups="2">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-c" deploy:region="aws-eu-west-1a" count="2" groups="2">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="cd" deploy:region="aws-us-east-1b" count="2" groups="2">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="delta" deploy:region="aws-us-east-1b" count="2" groups="2">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-a" deploy:region="aws-us-east-1b" count="2" groups="2">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-b" deploy:region="aws-us-east-1b" count="2" groups="2">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-c" deploy:region="aws-us-east-1b" count="2" groups="2">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="cd" deploy:region="aws-us-west-2a" count="2" groups="2">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="delta" deploy:region="aws-us-west-2a" count="2" groups="2">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-a" deploy:region="aws-us-west-2a" count="2" groups="2">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-b" deploy:region="aws-us-west-2a" count="2" groups="2">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="prod-c" deploy:region="aws-us-west-2a" count="2" groups="2">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="beta" deploy:region="aws-eu-west-1a" count="3" groups="3">
+ <resources vcpu="36" memory="72Gb" disk="900Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="beta" deploy:region="aws-us-west-2a" count="3" groups="3">
+ <resources vcpu="48" memory="96Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:environment="perf" count="4" groups="4">
+ <resources vcpu="48" memory="96Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="beta" deploy:region="aws-ap-southeast-1a" count="4" groups="4">
+ <resources vcpu="36" memory="72Gb" disk="900Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="gamma" deploy:region="aws-us-west-2a" count="5" groups="5">
+ <resources vcpu="96" memory="192Gb" disk="3600Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <nodes deploy:tags="gamma" deploy:region="aws-eu-west-1a" count="8" groups="8">
+ <resources vcpu="72" memory="144Gb" disk="1800Gb" disk-speed="fast" storage-type="local" />
+ </nodes>
+
+ <redundancy>1</redundancy>
+
+ </content>
+
+ <content id="filedocument" version="1.0">
+ <nodes count="2" groups="2">
+ <resources vcpu="4" memory="8Gb" disk="32Gb" />
+ </nodes>
+ <redundancy>1</redundancy>
+ </content>
+
+</services> \ No newline at end of file
diff --git a/config-model-api/abi-spec.json b/config-model-api/abi-spec.json
index e5c2be4eb43..9288debdb72 100644
--- a/config-model-api/abi-spec.json
+++ b/config-model-api/abi-spec.json
@@ -723,5 +723,20 @@
"fields" : [
"public static final com.yahoo.config.application.api.ValidationOverrides empty"
]
+ },
+ "com.yahoo.config.application.api.xml.DeploymentSpecXmlReader" : {
+ "superClass" : "java.lang.Object",
+ "interfaces" : [ ],
+ "attributes" : [
+ "public"
+ ],
+ "methods" : [
+ "public void <init>(boolean, java.time.Clock)",
+ "public void <init>()",
+ "public void <init>(boolean)",
+ "public com.yahoo.config.application.api.DeploymentSpec read(java.io.Reader)",
+ "public com.yahoo.config.application.api.DeploymentSpec read(java.lang.String)"
+ ],
+ "fields" : [ ]
}
-} \ No newline at end of file
+}
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/package-info.java b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/package-info.java
new file mode 100644
index 00000000000..a88368469c1
--- /dev/null
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/package-info.java
@@ -0,0 +1,7 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+@PublicApi
+package com.yahoo.config.application.api.xml;
+
+import com.yahoo.api.annotations.PublicApi;
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
index 2c4b81c6db8..cb42f877841 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
@@ -148,20 +148,20 @@ public class VespaMetricSet {
{
List<String> suffixes = List.of("sum", "count", "last", "min", "max");
- addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes);
- addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes);
- addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes);
- addMetric(metrics, "jdisc.thread_pool.rejected_tasks", suffixes);
- addMetric(metrics, "jdisc.thread_pool.size", suffixes);
- addMetric(metrics, "jdisc.thread_pool.max_allowed_size", suffixes);
- addMetric(metrics, "jdisc.thread_pool.active_threads", suffixes);
-
- addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", suffixes);
- addMetric(metrics, "jdisc.http.jetty.threadpool.thread.min", suffixes);
- addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", suffixes);
- addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", suffixes);
- addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", suffixes);
- addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", suffixes);
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_UNHANDLED_EXCEPTIONS.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_REJECTED_TASKS.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_SIZE.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS.baseName(), suffixes);
+
+ addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MAX_THREADS.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_MIN_THREADS.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_RESERVED_THREADS.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_BUSY_THREADS.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_TOTAL_THREADS.baseName(), suffixes);
+ addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_QUEUE_SIZE.baseName(), suffixes);
}
metrics.add(new Metric("httpapi_latency.max"));
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
index 6589d9d721a..7629fdd8bfd 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
@@ -11,7 +11,9 @@ import com.yahoo.config.application.XmlPreProcessor;
import com.yahoo.config.application.api.ApplicationMetaData;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.config.application.api.DeploymentInstanceSpec;
import com.yahoo.config.application.api.FileRegistry;
+import com.yahoo.config.application.api.xml.DeploymentSpecXmlReader;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.api.EndpointCertificateMetadata;
@@ -24,6 +26,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.AthenzDomain;
import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.DockerImage;
+import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.Tags;
import com.yahoo.config.provision.Zone;
import com.yahoo.container.jdisc.secretstore.SecretStore;
@@ -267,11 +270,17 @@ public class SessionPreparer {
File hostsXml = applicationPackage.getFileReference(Path.fromString("hosts.xml"));
// Validate after doing our own preprocessing on these two files
- if(servicesXml.exists()) {
- vespaPreprocess(applicationPackageDir.getAbsoluteFile(), servicesXml, applicationPackage.getMetaData());
+ ApplicationMetaData meta = applicationPackage.getMetaData();
+ InstanceName instance = meta.getApplicationId().instance();
+ Tags tags = applicationPackage.getDeployment().map(new DeploymentSpecXmlReader(false)::read)
+ .flatMap(spec -> spec.instance(instance))
+ .map(DeploymentInstanceSpec::tags)
+ .orElse(Tags.empty());
+ if (servicesXml.exists()) {
+ vespaPreprocess(applicationPackageDir.getAbsoluteFile(), servicesXml, meta, tags);
}
- if(hostsXml.exists()) {
- vespaPreprocess(applicationPackageDir.getAbsoluteFile(), hostsXml, applicationPackage.getMetaData());
+ if (hostsXml.exists()) {
+ vespaPreprocess(applicationPackageDir.getAbsoluteFile(), hostsXml, meta, tags);
}
if (zone.system().isPublic()) {
@@ -315,14 +324,15 @@ public class SessionPreparer {
}
}
- void vespaPreprocess(File appDir, File inputXml, ApplicationMetaData metaData) {
+ void vespaPreprocess(File appDir, File inputXml, ApplicationMetaData metaData, Tags tags) {
try {
+ InstanceName instance = metaData.getApplicationId().instance();
new XmlPreProcessor(appDir,
inputXml,
- metaData.getApplicationId().instance(),
+ instance,
zone.environment(),
zone.region(),
- metaData.getTags())
+ tags)
.run();
} catch (ParserConfigurationException | IOException | SAXException | TransformerException e) {
throw new RuntimeException(e);
diff --git a/container-core/src/main/java/com/yahoo/container/handler/threadpool/ThreadPoolMetric.java b/container-core/src/main/java/com/yahoo/container/handler/threadpool/ThreadPoolMetric.java
index 1c7a1cc4ebe..3f53b05dd6a 100644
--- a/container-core/src/main/java/com/yahoo/container/handler/threadpool/ThreadPoolMetric.java
+++ b/container-core/src/main/java/com/yahoo/container/handler/threadpool/ThreadPoolMetric.java
@@ -2,6 +2,7 @@
package com.yahoo.container.handler.threadpool;
import com.yahoo.jdisc.Metric;
+import com.yahoo.metrics.ContainerMetrics;
import java.util.Map;
@@ -24,28 +25,36 @@ class ThreadPoolMetric {
void reportRejectRequest() {
metric.add("serverRejectedRequests", 1L, defaultContext);
- metric.add("jdisc.thread_pool.rejected_tasks", 1L, defaultContext);
+ metric.add(ContainerMetrics.JDISC_THREAD_POOL_REJECTED_TASKS.baseName(), 1L, defaultContext);
}
void reportThreadPoolSize(long size) {
metric.set("serverThreadPoolSize", size, defaultContext);
- metric.set("jdisc.thread_pool.size", size, defaultContext);
+ metric.set(ContainerMetrics.JDISC_THREAD_POOL_SIZE.baseName(), size, defaultContext);
}
- void reportMaxAllowedThreadPoolSize(long size) { metric.set("jdisc.thread_pool.max_allowed_size", size, defaultContext); }
+ void reportMaxAllowedThreadPoolSize(long size) {
+ metric.set(ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE.baseName(), size, defaultContext);
+ }
void reportActiveThreads(long threads) {
metric.set("serverActiveThreads", threads, defaultContext);
- metric.set("jdisc.thread_pool.active_threads", threads, defaultContext);
+ metric.set(ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS.baseName(), threads, defaultContext);
+ }
+
+ void reportWorkQueueCapacity(long capacity) {
+ metric.set(ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY.baseName(), capacity, defaultContext);
+ }
+
+ void reportWorkQueueSize(long size) {
+ metric.set(ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE.baseName(), size, defaultContext);
}
- void reportWorkQueueCapacity(long capacity) { metric.set("jdisc.thread_pool.work_queue.capacity", capacity, defaultContext); }
- void reportWorkQueueSize(long size) { metric.set("jdisc.thread_pool.work_queue.size", size, defaultContext); }
void reportUnhandledException(Throwable t) {
Metric.Context ctx = metric.createContext(Map.of(
THREAD_POOL_NAME_DIMENSION, threadPoolName,
"exception", t.getClass().getSimpleName()));
- metric.set("jdisc.thread_pool.unhandled_exceptions", 1L, ctx);
+ metric.set(ContainerMetrics.JDISC_THREAD_POOL_UNHANDLED_EXCEPTIONS.baseName(), 1L, ctx);
}
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java
index 7b1c12dd499..e16e4b51959 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/MetricDefinitions.java
@@ -66,13 +66,13 @@ class MetricDefinitions {
static final String SSL_HANDSHAKE_FAILURE_UNKNOWN = "jdisc.http.ssl.handshake.failure.unknown";
static final String SSL_HANDSHAKE_FAILURE_CONNECTION_CLOSED = "jdisc.http.ssl.handshake.failure.connection_closed";
- static final String JETTY_THREADPOOL_MAX_THREADS = "jdisc.http.jetty.threadpool.thread.max";
- static final String JETTY_THREADPOOL_MIN_THREADS = "jdisc.http.jetty.threadpool.thread.min";
- static final String JETTY_THREADPOOL_RESERVED_THREADS = "jdisc.http.jetty.threadpool.thread.reserved";
- static final String JETTY_THREADPOOL_BUSY_THREADS = "jdisc.http.jetty.threadpool.thread.busy";
- static final String JETTY_THREADPOOL_IDLE_THREADS = "jdisc.http.jetty.threadpool.thread.idle";
- static final String JETTY_THREADPOOL_TOTAL_THREADS = "jdisc.http.jetty.threadpool.thread.total";
- static final String JETTY_THREADPOOL_QUEUE_SIZE = "jdisc.http.jetty.threadpool.queue.size";
+ static final String JETTY_THREADPOOL_MAX_THREADS = ContainerMetrics.JETTY_THREADPOOL_MAX_THREADS.baseName();
+ static final String JETTY_THREADPOOL_MIN_THREADS = ContainerMetrics.JETTY_THREADPOOL_MIN_THREADS.baseName();
+ static final String JETTY_THREADPOOL_RESERVED_THREADS = ContainerMetrics.JETTY_THREADPOOL_RESERVED_THREADS.baseName();
+ static final String JETTY_THREADPOOL_BUSY_THREADS = ContainerMetrics.JETTY_THREADPOOL_BUSY_THREADS.baseName();
+ static final String JETTY_THREADPOOL_IDLE_THREADS = ContainerMetrics.JETTY_THREADPOOL_IDLE_THREADS.baseName();
+ static final String JETTY_THREADPOOL_TOTAL_THREADS = ContainerMetrics.JETTY_THREADPOOL_TOTAL_THREADS.baseName();
+ static final String JETTY_THREADPOOL_QUEUE_SIZE = ContainerMetrics.JETTY_THREADPOOL_QUEUE_SIZE.baseName();
static final String FILTERING_REQUEST_HANDLED = "jdisc.http.filtering.request.handled";
static final String FILTERING_REQUEST_UNHANDLED = "jdisc.http.filtering.request.unhandled";
diff --git a/container-core/src/main/java/com/yahoo/metrics/ContainerMetrics.java b/container-core/src/main/java/com/yahoo/metrics/ContainerMetrics.java
index 5f342333a8f..ba160873c4f 100644
--- a/container-core/src/main/java/com/yahoo/metrics/ContainerMetrics.java
+++ b/container-core/src/main/java/com/yahoo/metrics/ContainerMetrics.java
@@ -10,7 +10,37 @@ public enum ContainerMetrics {
HTTP_STATUS_3XX("http.status.3xx", Unit.RESPONSE, "Number of responses with a 3xx status"),
HTTP_STATUS_4XX("http.status.4xx", Unit.RESPONSE, "Number of responses with a 4xx status"),
HTTP_STATUS_5XX("http.status.5xx", Unit.RESPONSE, "Number of responses with a 5xx status"),
- JDISC_GC_MS("jdisc.gc.ms", Unit.MILLISECOND, "Time spent in garbage collection");
+ JDISC_GC_MS("jdisc.gc.ms", Unit.MILLISECOND, "Time spent in garbage collection"),
+
+ JDISC_THREAD_POOL_UNHANDLED_EXCEPTIONS("jdisc.thread_pool.unhandled_exceptions", Unit.THREAD,
+ "Number of exceptions thrown by tasks"),
+ JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY("jdisc.thread_pool.work_queue.capacity", Unit.THREAD,
+ "Capacity of the task queue"),
+ JDISC_THREAD_POOL_WORK_QUEUE_SIZE("jdisc.thread_pool.work_queue.size", Unit.THREAD,
+ "Size of the task queue"),
+ JDISC_THREAD_POOL_REJECTED_TASKS("jdisc.thread_pool.rejected_tasks", Unit.THREAD,
+ "Number of tasks rejected by the thread pool"),
+ JDISC_THREAD_POOL_SIZE("jdisc.thread_pool.size", Unit.THREAD,
+ "Size of the thread pool"),
+ JDISC_THREAD_POOL_MAX_ALLOWED_SIZE("jdisc.thread_pool.max_allowed_size", Unit.THREAD,
+ "The maximum allowed number of threads in the pool"),
+ JDISC_THREAD_POOL_ACTIVE_THREADS("jdisc.thread_pool.active_threads", Unit.THREAD,
+ "Number of threads that are active"),
+
+ JETTY_THREADPOOL_MAX_THREADS("jdisc.http.jetty.threadpool.thread.max", Unit.THREAD,
+ "Configured maximum number of threads"),
+ JETTY_THREADPOOL_MIN_THREADS("jdisc.http.jetty.threadpool.thread.min", Unit.THREAD,
+ "Configured minimum number of threads"),
+ JETTY_THREADPOOL_RESERVED_THREADS("jdisc.http.jetty.threadpool.thread.reserved", Unit.THREAD,
+ "Configured number of reserved threads or -1 for heuristic"),
+ JETTY_THREADPOOL_BUSY_THREADS("jdisc.http.jetty.threadpool.thread.busy", Unit.THREAD,
+ "Number of threads executing internal and transient jobs"),
+ JETTY_THREADPOOL_IDLE_THREADS("jdisc.http.jetty.threadpool.thread.idle", Unit.THREAD,
+ "Number of idle threads"),
+ JETTY_THREADPOOL_TOTAL_THREADS("jdisc.http.jetty.threadpool.thread.total", Unit.THREAD,
+ "Current number of threads"),
+ JETTY_THREADPOOL_QUEUE_SIZE("jdisc.http.jetty.threadpool.queue.size", Unit.THREAD,
+ "Current size of the job queue");
private final String name;
private final Unit unit;
diff --git a/container-core/src/test/java/com/yahoo/container/handler/threadpool/ContainerThreadPoolImplTest.java b/container-core/src/test/java/com/yahoo/container/handler/threadpool/ContainerThreadPoolImplTest.java
index 4639022d767..606f8052670 100644
--- a/container-core/src/test/java/com/yahoo/container/handler/threadpool/ContainerThreadPoolImplTest.java
+++ b/container-core/src/test/java/com/yahoo/container/handler/threadpool/ContainerThreadPoolImplTest.java
@@ -6,6 +6,7 @@ import com.yahoo.concurrent.Receiver;
import com.yahoo.container.protect.ProcessTerminator;
import com.yahoo.container.test.MetricMock;
import com.yahoo.jdisc.Metric;
+import com.yahoo.metrics.ContainerMetrics;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
@@ -81,10 +82,10 @@ public class ContainerThreadPoolImplTest {
assertEquals(1200, executor.getQueue().remainingCapacity());
assertEquals(7, metrics.innvocations().size());
assertEquals(3L, metrics.innvocations().get("serverThreadPoolSize").val);
- assertEquals(3L, metrics.innvocations().get("jdisc.thread_pool.max_allowed_size").val);
+ assertEquals(3L, metrics.innvocations().get(ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE.baseName()).val);
assertEquals(0L, metrics.innvocations().get("serverActiveThreads").val);
- assertEquals(1200L, metrics.innvocations().get("jdisc.thread_pool.work_queue.capacity").val);
- assertEquals(0L, metrics.innvocations().get("jdisc.thread_pool.work_queue.size").val);
+ assertEquals(1200L, metrics.innvocations().get(ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY.baseName()).val);
+ assertEquals(0L, metrics.innvocations().get(ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE.baseName()).val);
}
@Test
@@ -95,10 +96,10 @@ public class ContainerThreadPoolImplTest {
assertEquals(0, executor.getQueue().remainingCapacity());
assertEquals(7, metrics.innvocations().size());
assertEquals(64L, metrics.innvocations().get("serverThreadPoolSize").val);
- assertEquals(64L, metrics.innvocations().get("jdisc.thread_pool.max_allowed_size").val);
+ assertEquals(64L, metrics.innvocations().get(ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE.baseName()).val);
assertEquals(0L, metrics.innvocations().get("serverActiveThreads").val);
- assertEquals(64L, metrics.innvocations().get("jdisc.thread_pool.work_queue.capacity").val);
- assertEquals(0L, metrics.innvocations().get("jdisc.thread_pool.work_queue.size").val);
+ assertEquals(64L, metrics.innvocations().get(ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY.baseName()).val);
+ assertEquals(0L, metrics.innvocations().get(ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE.baseName()).val);
}
@Test
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java
index 5becf5dad54..4a8bc3cd09a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java
@@ -2,8 +2,10 @@
package com.yahoo.vespa.hosted.controller.application.pkg;
import com.google.common.hash.Funnel;
+import com.google.common.hash.HashFunction;
import com.google.common.hash.Hasher;
import com.google.common.hash.Hashing;
+import com.google.common.hash.HashingOutputStream;
import com.yahoo.component.Version;
import com.yahoo.vespa.archive.ArchiveStreamReader;
import com.yahoo.vespa.archive.ArchiveStreamReader.ArchiveFile;
@@ -18,7 +20,6 @@ import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Tags;
-import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
@@ -27,12 +28,13 @@ import com.yahoo.vespa.hosted.controller.deployment.ZipBuilder;
import com.yahoo.yolean.Exceptions;
import java.io.ByteArrayInputStream;
+import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
+import java.io.UncheckedIOException;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.Paths;
-import java.security.cert.X509Certificate;
import java.time.Duration;
import java.time.Instant;
import java.time.ZoneOffset;
@@ -42,6 +44,7 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
+import java.util.Random;
import java.util.Set;
import java.util.SortedMap;
import java.util.TreeMap;
@@ -219,20 +222,18 @@ public class ApplicationPackage {
// Hashes all files and settings that require a deployment to be forwarded to configservers
private String calculateBundleHash(byte[] zippedContent) {
Predicate<String> entryMatcher = name -> ! name.endsWith(deploymentFile) && ! name.endsWith(buildMetaFile);
- SortedMap<String, Long> crcByEntry = new TreeMap<>();
Options options = Options.standard().pathPredicate(entryMatcher);
+ HashingOutputStream hashOut = new HashingOutputStream(Hashing.murmur3_128(-1), OutputStream.nullOutputStream());
ArchiveFile file;
try (ArchiveStreamReader reader = ArchiveStreamReader.ofZip(new ByteArrayInputStream(zippedContent), options)) {
- OutputStream discard = OutputStream.nullOutputStream();
- while ((file = reader.readNextTo(discard)) != null) {
- crcByEntry.put(file.path().toString(), file.crc32().orElse(-1));
+ while ((file = reader.readNextTo(hashOut)) != null) {
+ hashOut.write(file.path().toString().getBytes(UTF_8));
}
}
- Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.forEach((key, value) -> {
- into.putBytes(key.getBytes());
- into.putLong(value);
- });
- return hasher().putObject(crcByEntry, funnel)
+ catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ return hasher().putLong(hashOut.hash().asLong())
.putInt(deploymentSpec.deployableHashCode())
.hash().toString();
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
index 6031239e0a9..dd285917f2a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentStatus.java
@@ -33,8 +33,10 @@ import com.yahoo.vespa.hosted.controller.versions.VespaVersion.Confidence;
import java.time.Duration;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
+import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -738,15 +740,16 @@ public class DeploymentStatus {
*/
private List<JobId> prerequisiteTests(JobId prodJob, JobType testType) {
List<JobId> tests = new ArrayList<>();
- Set<InstanceName> instances = new LinkedHashSet<>();
- instances.add(prodJob.application().instance());
- while ( ! instances.isEmpty()) {
- Iterator<InstanceName> iterator = instances.iterator();
- InstanceName instance = iterator.next();
- iterator.remove();
+ Set<InstanceName> seen = new LinkedHashSet<>();
+ Deque<InstanceName> pending = new ArrayDeque<>();
+ pending.add(prodJob.application().instance());
+ while ( ! pending.isEmpty()) {
+ InstanceName instance = pending.poll();
Optional<JobId> test = declaredTest(application().id().instance(instance), testType);
if (test.isPresent()) tests.add(test.get());
- else instanceSteps().get(instance).dependencies().stream().map(StepStatus::instance).forEach(instances::add);
+ else instanceSteps().get(instance).dependencies().stream().map(StepStatus::instance).forEach(dependency -> {
+ if (seen.add(dependency)) pending.add(dependency);
+ });
}
if (tests.isEmpty()) tests.add(firstDeclaredOrElseImplicitTest(testType));
return tests;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/DeploymentQuotaCalculatorTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/DeploymentQuotaCalculatorTest.java
index db45933f498..ff0bc4c8876 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/DeploymentQuotaCalculatorTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/DeploymentQuotaCalculatorTest.java
@@ -13,7 +13,6 @@ import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.billing.Quota;
-import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.ApplicationData;
import com.yahoo.vespa.hosted.controller.deployment.RevisionHistory;
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
index 6f3d2d74bc6..f0182ae36e4 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
@@ -27,27 +27,21 @@ import com.yahoo.vespa.hosted.node.admin.task.util.process.Terminal;
import java.net.URI;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.Comparator;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
import static com.yahoo.vespa.hosted.node.admin.maintenance.disk.DiskCleanupRule.Priority;
-import static com.yahoo.yolean.Exceptions.uncheck;
/**
* @author freva
@@ -172,33 +166,7 @@ public class StorageMaintainer {
/** Checks if container has any new coredumps, reports and archives them if so */
public void handleCoreDumpsForContainer(NodeAgentContext context, Optional<Container> container, boolean throwIfCoreBeingWritten) {
if (context.isDisabled(NodeAgentTask.CoreDumps)) return;
- coredumpHandler.converge(context, () -> getCoredumpNodeAttributes(context, container),
- container.map(Container::image), throwIfCoreBeingWritten);
- }
-
- private Map<String, Object> getCoredumpNodeAttributes(NodeAgentContext context, Optional<Container> container) {
- Map<String, String> attributes = new HashMap<>();
- attributes.put("hostname", context.node().hostname());
- attributes.put("system", context.zone().getSystemName().value());
- attributes.put("region", context.zone().getRegionName().value());
- attributes.put("environment", context.zone().getEnvironment().value());
- attributes.put("flavor", context.node().flavor());
- attributes.put("kernel_version", System.getProperty("os.version"));
- attributes.put("cpu_microcode_version", getMicrocodeVersion());
-
- container.map(c -> c.image().asString()).ifPresent(image -> attributes.put("docker_image", image));
- container.flatMap(c -> c.image().tag()).ifPresent(version -> attributes.put("vespa_version", version));
- context.node().parentHostname().ifPresent(parent -> attributes.put("parent_hostname", parent));
- context.node().owner().ifPresent(owner -> {
- attributes.put("tenant", owner.tenant().value());
- attributes.put("application", owner.application().value());
- attributes.put("instance", owner.instance().value());
- });
- context.node().membership().ifPresent(membership -> {
- attributes.put("cluster_id", membership.clusterId());
- attributes.put("cluster_type", membership.type().value());
- });
- return Collections.unmodifiableMap(attributes);
+ coredumpHandler.converge(context, container.map(Container::image), throwIfCoreBeingWritten);
}
/**
@@ -225,18 +193,4 @@ public class StorageMaintainer {
if (context.nodeType() != NodeType.tenant)
context.paths().of("/").getFileSystem().createRoot();
}
-
- private String getMicrocodeVersion() {
- String output = uncheck(() -> Files.readAllLines(Paths.get("/proc/cpuinfo")).stream()
- .filter(line -> line.startsWith("microcode"))
- .findFirst()
- .orElse("microcode : UNKNOWN"));
-
- String[] results = output.split(":");
- if (results.length != 2) {
- throw ConvergenceException.ofError("Result from detect microcode command not as expected: " + output);
- }
-
- return results[1].trim();
- }
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
index e2da984fa10..bfc4c09cf9e 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandler.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.hosted.node.admin.maintenance.coredump;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.security.KeyId;
import com.yahoo.security.SecretSharedKey;
-import com.yahoo.security.SharedKeyGenerator;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.flags.Flags;
@@ -29,11 +28,9 @@ import java.io.OutputStream;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.time.Clock;
import java.util.Comparator;
import java.util.List;
-import java.util.Map;
import java.util.Optional;
import java.util.UUID;
import java.util.function.Predicate;
@@ -101,8 +98,7 @@ public class CoredumpHandler {
}
- public void converge(NodeAgentContext context, Supplier<Map<String, Object>> nodeAttributesSupplier,
- Optional<DockerImage> dockerImage, boolean throwIfCoreBeingWritten) {
+ public void converge(NodeAgentContext context, Optional<DockerImage> dockerImage, boolean throwIfCoreBeingWritten) {
ContainerPath containerCrashPath = context.paths().of(crashPatchInContainer, context.users().vespa());
ContainerPath containerProcessingPath = containerCrashPath.resolve(PROCESSING_DIRECTORY_NAME);
@@ -328,7 +324,7 @@ public class CoredumpHandler {
}
private String getMicrocodeVersion() {
- String output = uncheck(() -> Files.readAllLines(Paths.get("/proc/cpuinfo")).stream()
+ String output = uncheck(() -> Files.readAllLines(doneCoredumpsPath.getFileSystem().getPath("/proc/cpuinfo")).stream()
.filter(line -> line.startsWith("microcode"))
.findFirst()
.orElse("microcode : UNKNOWN"));
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/DefaultEnvWriter.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/DefaultEnvWriter.java
index c5a28c26786..1889332ee49 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/DefaultEnvWriter.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/DefaultEnvWriter.java
@@ -5,7 +5,6 @@ import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
@@ -62,7 +61,7 @@ public class DefaultEnvWriter {
return false;
} else {
context.log(logger, "Updating " + defaultEnvFile.toString());
- Path tempFile = Paths.get(defaultEnvFile + ".tmp");
+ Path tempFile = defaultEnvFile.resolveSibling(defaultEnvFile.getFileName() + ".tmp");
uncheck(() -> Files.write(tempFile, newDefaultEnvLines));
uncheck(() -> Files.move(tempFile, defaultEnvFile, ATOMIC_MOVE));
return true;
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java
index ac5035216e9..fbef3def446 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPath.java
@@ -12,7 +12,6 @@ import java.nio.file.NoSuchFileException;
import java.nio.file.NotDirectoryException;
import java.nio.file.OpenOption;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.FileAttribute;
@@ -45,7 +44,7 @@ public class UnixPath {
private final Path path;
public UnixPath(Path path) { this.path = path; }
- public UnixPath(String path) { this(Paths.get(path)); }
+ public UnixPath(String path) { this(Path.of(path)); }
public Path toPath() { return path; }
public UnixPath resolve(String relativeOrAbsolutePath) { return new UnixPath(path.resolve(relativeOrAbsolutePath)); }
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
index 65b62848d4b..daae19478ed 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
@@ -21,7 +21,6 @@ import java.io.IOException;
import java.nio.file.FileSystem;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
@@ -63,7 +62,7 @@ public class StorageMaintainerTest {
@Test
void testNonExistingDiskUsed() {
- DiskSize size = storageMaintainer.getDiskUsed(null, Paths.get("/fake/path"));
+ DiskSize size = storageMaintainer.getDiskUsed(null, Path.of("/fake/path"));
assertEquals(DiskSize.ZERO, size);
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandlerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandlerTest.java
index fd7c366fb8b..33d785eb04e 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandlerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoredumpHandlerTest.java
@@ -160,6 +160,8 @@ public class CoredumpHandlerTest {
.setCoreDumpPath(context.paths().of("/home/docker/dumps/container-123/id-123/dump_core.456"))
.setDockerImage(DockerImage.fromString("example.com/vespa/ci:6.48.4"));
+ new UnixPath(fileSystem.getPath("/proc/cpuinfo")).createParents().writeUtf8File("microcode\t: 0xf0");
+
ContainerPath coredumpDirectory = context.paths().of("/var/crash/id-123");
Files.createDirectories(coredumpDirectory.pathOnHost());
Files.createFile(coredumpDirectory.resolve("dump_core.456"));
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/DefaultEnvWriterTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/DefaultEnvWriterTest.java
index bc461af0eb3..bd523a16705 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/DefaultEnvWriterTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/DefaultEnvWriterTest.java
@@ -9,11 +9,12 @@ import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.util.logging.Logger;
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
-import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
@@ -27,8 +28,8 @@ public class DefaultEnvWriterTest {
@TempDir
public File temporaryFolder;
- private static final Path EXAMPLE_FILE = Paths.get("src/test/resources/default-env-example.txt");
- private static final Path EXPECTED_RESULT_FILE = Paths.get("src/test/resources/default-env-rewritten.txt");
+ private static final Path EXAMPLE_FILE = Path.of("src/test/resources/default-env-example.txt");
+ private static final Path EXPECTED_RESULT_FILE = Path.of("src/test/resources/default-env-rewritten.txt");
private final TaskContext context = mock(TaskContext.class);
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileFinderTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileFinderTest.java
index ce193059fb2..bbc549230c1 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileFinderTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileFinderTest.java
@@ -13,7 +13,6 @@ import java.nio.file.FileSystem;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
-import java.nio.file.Paths;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.FileTime;
import java.time.Duration;
@@ -26,7 +25,10 @@ import java.util.stream.Collectors;
import java.util.stream.Stream;
import static java.util.Set.of;
-import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -78,7 +80,7 @@ public class FileFinderTest {
@Test
void throws_if_prune_path_not_under_base_path() {
assertThrows(IllegalArgumentException.class, () -> {
- FileFinder.files(Paths.get("/some/path")).prune(Paths.get("/other/path"));
+ FileFinder.files(Path.of("/some/path")).prune(Path.of("/other/path"));
});
}
@@ -193,7 +195,7 @@ public class FileFinderTest {
@Test
void age_filter_test() {
- Path path = Paths.get("/my/fake/path");
+ Path path = Path.of("/my/fake/path");
when(attributes.lastModifiedTime()).thenReturn(FileTime.from(Instant.now().minus(Duration.ofHours(1))));
FileFinder.FileAttributes fileAttributes = new FileFinder.FileAttributes(path, attributes);
@@ -206,7 +208,7 @@ public class FileFinderTest {
@Test
void size_filters() {
- Path path = Paths.get("/my/fake/path");
+ Path path = Path.of("/my/fake/path");
when(attributes.size()).thenReturn(100L);
FileFinder.FileAttributes fileAttributes = new FileFinder.FileAttributes(path, attributes);
@@ -219,7 +221,7 @@ public class FileFinderTest {
@Test
void filename_filters() {
- Path path = Paths.get("/my/fake/path/some-12352-file.json");
+ Path path = Path.of("/my/fake/path/some-12352-file.json");
FileFinder.FileAttributes fileAttributes = new FileFinder.FileAttributes(path, attributes);
assertTrue(FileFinder.nameStartsWith("some-").test(fileAttributes));
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
index a94adc5aaae..bcc571355e3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
@@ -190,7 +190,8 @@ class MaintenanceDeployment implements Closeable {
markPreferToRetire(node, false, agent, nodeRepository); // Necessary if this failed, no-op otherwise
// Immediately clean up if we reserved the node but could not activate or reserved a node on the wrong host
- expectedNewNode.flatMap(node -> nodeRepository.nodes().node(node.hostname(), Node.State.reserved))
+ expectedNewNode.flatMap(node -> nodeRepository.nodes().node(node.hostname()))
+ .filter(node -> node.state() == Node.State.reserved)
.ifPresent(node -> nodeRepository.nodes().deallocate(node, agent, "Expired by " + agent));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java
index b602d2ac7cd..94683d463af 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java
@@ -96,7 +96,8 @@ public class NodeHealthTracker extends NodeRepositoryMaintainer {
/** Get node by given hostname and application. The applicationLock must be held when calling this */
private Optional<Node> getNode(String hostname, ApplicationId application, @SuppressWarnings("unused") Mutex applicationLock) {
- return nodeRepository().nodes().node(hostname, Node.State.active)
+ return nodeRepository().nodes().node(hostname)
+ .filter(node -> node.state() == Node.State.active)
.filter(node -> node.allocation().isPresent())
.filter(node -> node.allocation().get().owner().equals(application));
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java
index c0d0b220767..b2becc7ecfd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/IP.java
@@ -131,8 +131,9 @@ public class IP {
* @throws IllegalArgumentException if there are IP conflicts with existing nodes
*/
public static List<Node> verify(List<Node> nodes, LockedNodeList allNodes) {
+ NodeList sortedNodes = allNodes.sortedBy(Comparator.comparing(Node::hostname));
for (var node : nodes) {
- for (var other : allNodes) {
+ for (var other : sortedNodes) {
if (node.equals(other)) continue;
if (canAssignIpOf(other, node)) continue;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index b507d66e14f..5c2a6601ad8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -76,11 +76,11 @@ public class Nodes {
public void rewrite() {
Instant start = clock.instant();
int nodesWritten = 0;
- for (Node.State state : Node.State.values()) {
- List<Node> nodes = db.readNodes(state);
+ Map<Node.State, NodeList> nodes = list().groupingBy(Node::state);
+ for (var kv : nodes.entrySet()) {
// TODO(mpolden): This should take the lock before writing
- db.writeTo(state, nodes, Agent.system, Optional.empty());
- nodesWritten += nodes.size();
+ db.writeTo(kv.getKey(), kv.getValue().asList(), Agent.system, Optional.empty());
+ nodesWritten += kv.getValue().size();
}
Instant end = clock.instant();
log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end)));
@@ -88,24 +88,19 @@ public class Nodes {
// ---------------- Query API ----------------------------------------------------------------
- /**
- * Finds and returns the node with the hostname in any of the given states, or empty if not found
- *
- * @param hostname the full host name of the node
- * @param inState the states the node may be in. If no states are given, it will be returned from any state
- * @return the node, or empty if it was not found in any of the given states
- */
- public Optional<Node> node(String hostname, Node.State... inState) {
- return db.readNode(hostname, inState);
+ /** Finds and returns the node with given hostname, or empty if not found */
+ public Optional<Node> node(String hostname) {
+ return db.readNode(hostname);
}
/**
- * Returns a list of nodes in this repository in any of the given states
+ * Returns an unsorted list of all nodes in this repository, in any of the given states
*
- * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned
+ * @param inState the states to return nodes from. If no states are given, all nodes are returned
*/
public NodeList list(Node.State... inState) {
- return NodeList.copyOf(db.readNodes(inState));
+ NodeList nodes = NodeList.copyOf(db.readNodes());
+ return inState.length == 0 ? nodes : nodes.state(Set.of(inState));
}
/** Returns a locked list of all nodes in this repository */
@@ -768,13 +763,9 @@ public class Nodes {
for (int i = 0; i < maxRetries; ++i) {
Mutex lockToClose = lock(staleNode, timeout);
try {
- // As an optimization we first try finding the node in the same state
- Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state());
+ Optional<Node> freshNode = node(staleNode.hostname());
if (freshNode.isEmpty()) {
- freshNode = node(staleNode.hostname());
- if (freshNode.isEmpty()) {
- return Optional.empty();
- }
+ return Optional.empty();
}
if (node.type() != NodeType.tenant ||
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CachingCurator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CachingCurator.java
index 90d30a3a8bc..589468c48b8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CachingCurator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CachingCurator.java
@@ -208,7 +208,7 @@ public class CachingCurator {
List<String> getChildren(Path path);
/**
- * Returns the a copy of the content of this child - which may be empty.
+ * Returns a copy of the content of this child - which may be empty.
*/
Optional<byte[]> getData(Path path);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
index cebc185360a..b9821b48fba 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
@@ -96,7 +96,7 @@ public class CuratorDb {
db.create(nodesPath);
// TODO(mpolden): Remove state paths after migration to nodesPath
for (Node.State state : Node.State.values())
- db.create(toPath(state));
+ db.create(toLegacyPath(state));
db.create(applicationsPath);
db.create(inactiveJobsPath);
db.create(infrastructureVersionsPath);
@@ -117,7 +117,7 @@ public class CuratorDb {
node = node.with(node.history().recordStateTransition(null, expectedState, agent, clock.instant()));
// TODO(mpolden): Remove after migration to nodesPath
byte[] serialized = nodeSerializer.toJson(node);
- curatorTransaction.add(CuratorOperations.create(toPath(node).getAbsolute(), serialized));
+ curatorTransaction.add(CuratorOperations.create(toLegacyPath(node).getAbsolute(), serialized));
curatorTransaction.add(CuratorOperations.create(nodePath(node).getAbsolute(), serialized));
}
@@ -137,7 +137,7 @@ public class CuratorDb {
/** Removes given nodes in transaction */
public void removeNodes(List<Node> nodes, NestedTransaction transaction) {
for (Node node : nodes) {
- Path path = toPath(node.state(), node.hostname());
+ Path path = toLegacyPath(node.state(), node.hostname());
CuratorTransaction curatorTransaction = db.newCuratorTransactionIn(transaction);
// TODO(mpolden): Remove after migration to nodesPath
curatorTransaction.add(CuratorOperations.delete(path.getAbsolute()));
@@ -237,8 +237,8 @@ public class CuratorDb {
private void writeNode(Node.State toState, CuratorTransaction curatorTransaction, Node node, Node newNode) {
byte[] nodeData = nodeSerializer.toJson(newNode);
{ // TODO(mpolden): Remove this after migration to nodesPath
- String currentNodePath = toPath(node).getAbsolute();
- String newNodePath = toPath(toState, newNode.hostname()).getAbsolute();
+ String currentNodePath = toLegacyPath(node).getAbsolute();
+ String newNodePath = toLegacyPath(toState, newNode.hostname()).getAbsolute();
if (newNodePath.equals(currentNodePath)) {
curatorTransaction.add(CuratorOperations.setData(currentNodePath, nodeData));
} else {
@@ -255,61 +255,39 @@ public class CuratorDb {
return node.status();
}
- /**
- * Returns all nodes which are in one of the given states.
- * If no states are given this returns all nodes.
- *
- * @return the nodes in a mutable list owned by the caller
- */
- public List<Node> readNodes(Node.State ... states) {
- List<Node> nodes = new ArrayList<>();
- if (states.length == 0)
- states = Node.State.values();
+ /** Returns all existing nodes */
+ public List<Node> readNodes() {
CachingCurator.Session session = db.getSession();
- for (Node.State state : states) {
- for (String hostname : session.getChildren(toPath(state))) {
- Optional<Node> node = readNode(session, hostname, state);
- node.ifPresent(nodes::add); // node might disappear between getChildren and getNode
- }
- }
- return nodes;
+ return session.getChildren(nodesPath).stream()
+ .flatMap(hostname -> readNode(session, hostname).stream())
+ .toList();
}
- /**
- * Returns a particular node, or empty if this node is not in any of the given states.
- * If no states are given this returns the node if it is present in any state.
- */
- public Optional<Node> readNode(CachingCurator.Session session, String hostname, Node.State ... states) {
- if (states.length == 0)
- states = Node.State.values();
- for (Node.State state : states) {
- Optional<byte[]> nodeData = session.getData(toPath(state, hostname));
- if (nodeData.isPresent())
- return nodeData.map((data) -> nodeSerializer.fromJson(state, data));
- }
- return Optional.empty();
+ private Optional<Node> readNode(CachingCurator.Session session, String hostname) {
+ return session.getData(nodePath(hostname)).map(nodeSerializer::fromJson);
}
- /**
- * Returns a particular node, or empty if this noe is not in any of the given states.
- * If no states are given this returns the node if it is present in any state.
- */
- public Optional<Node> readNode(String hostname, Node.State ... states) {
- return readNode(db.getSession(), hostname, states);
+ /** Read node with given hostname, if any such node exists */
+ public Optional<Node> readNode(String hostname) {
+ return readNode(db.getSession(), hostname);
}
- private Path toPath(Node.State nodeState) { return root.append(toDir(nodeState)); }
+ private Path toLegacyPath(Node.State nodeState) { return root.append(toDir(nodeState)); }
- private Path toPath(Node node) {
+ private Path toLegacyPath(Node node) {
return root.append(toDir(node.state())).append(node.hostname());
}
- private Path toPath(Node.State nodeState, String nodeName) {
+ private Path toLegacyPath(Node.State nodeState, String nodeName) {
return root.append(toDir(nodeState)).append(nodeName);
}
private Path nodePath(Node node) {
- return nodesPath.append(node.hostname());
+ return nodePath(node.hostname());
+ }
+
+ private Path nodePath(String hostname) {
+ return nodesPath.append(hostname);
}
/** Creates and returns the path to the lock for this application */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
index f448266b94b..39cccafb8ef 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
@@ -40,7 +40,6 @@ import com.yahoo.vespa.hosted.provision.node.Status;
import com.yahoo.vespa.hosted.provision.node.TrustStoreItem;
import java.io.IOException;
-import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
@@ -266,19 +265,16 @@ public class NodeSerializer {
// ---------------- Deserialization --------------------------------------------------
- public Node fromJson(Node.State state, byte[] data) {
- long key = Hashing.sipHash24().newHasher()
- .putString(state.name(), StandardCharsets.UTF_8)
- .putBytes(data).hash()
- .asLong();
+ public Node fromJson(byte[] data) {
+ long key = Hashing.sipHash24().newHasher().putBytes(data).hash().asLong();
try {
- return cache.get(key, () -> nodeFromSlime(state, SlimeUtils.jsonToSlime(data).get()));
+ return cache.get(key, () -> nodeFromSlime(SlimeUtils.jsonToSlime(data).get()));
} catch (ExecutionException e) {
throw new UncheckedExecutionException(e);
}
}
- private Node nodeFromSlime(Node.State state, Inspector object) {
+ private Node nodeFromSlime(Inspector object) {
Flavor flavor = flavorFromSlime(object);
return new Node(object.field(idKey).asString(),
new IP.Config(ipAddressesFromSlime(object, ipAddressesKey),
@@ -288,7 +284,7 @@ public class NodeSerializer {
SlimeUtils.optionalString(object.field(parentHostnameKey)),
flavor,
statusFromSlime(object),
- state,
+ nodeStateFromString(object.field(stateKey).asString()),
allocationFromSlime(flavor.resources(), object.field(instanceKey)),
historyFromSlime(object),
nodeTypeFromString(object.field(nodeTypeKey).asString()),
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
index 942f029bc6a..fd466108063 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodesResponse.java
@@ -105,16 +105,12 @@ class NodesResponse extends SlimeJsonResponse {
}
/** Outputs the nodes in the given states to a node array */
- private void nodesToSlime(Set<Node.State> statesToRead, Cursor parentObject) {
+ private void nodesToSlime(Set<Node.State> states, Cursor parentObject) {
Cursor nodeArray = parentObject.setArray("nodes");
- boolean sortByNodeType = statesToRead.size() == 1;
- statesToRead.stream().sorted().forEach(state -> {
- NodeList nodes = nodeRepository.nodes().list(state);
- if (sortByNodeType) {
- nodes = nodes.sortedBy(Comparator.comparing(Node::type));
- }
- toSlime(nodes, nodeArray);
- });
+ NodeList nodes = nodeRepository.nodes().list()
+ .state(states)
+ .sortedBy(Comparator.comparing(Node::hostname));
+ toSlime(nodes, nodeArray);
}
private void toSlime(NodeList nodes, Cursor array) {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
index 0a179babc10..7d9a48f6773 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
@@ -214,8 +214,6 @@ public class NodeRepositoryTest {
tester.addHost("id2", "host1", "default", NodeType.host);
host1 = tester.nodeRepository().nodes().node("host1").get();
assertEquals("This is the newly added node", "id2", host1.id());
- assertFalse("The old 'host1' is removed",
- tester.nodeRepository().nodes().node("host1", Node.State.deprovisioned).isPresent());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToRetire());
assertFalse("Not transferred from deprovisioned host", host1.status().wantToDeprovision());
assertTrue("Transferred from deprovisioned host", host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
index e569e9b0382..dc1e1320ff2 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
@@ -48,7 +48,6 @@ import java.util.function.Consumer;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
-import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static com.yahoo.config.provision.NodeResources.DiskSpeed.any;
@@ -142,7 +141,7 @@ public class RealDataScenarioTest {
Consumer<String> consumer = input -> {
if (state.get() != null) {
String json = input.substring(input.indexOf("{\""), input.lastIndexOf('}') + 1);
- Node node = nodeSerializer.fromJson(state.get(), json.getBytes(UTF_8));
+ Node node = nodeSerializer.fromJson(json.getBytes(UTF_8));
nodeRepository.database().addNodesInState(List.of(node), state.get(), Agent.system);
state.set(null);
} else {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
index 4f23ea3a578..44050fa747c 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
@@ -40,7 +40,6 @@ import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
-import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.any;
@@ -118,14 +117,14 @@ public class MetricsReporterTest {
expectedMetrics.put("suspendedSeconds", 123L);
expectedMetrics.put("numberOfServices", 0L);
- expectedMetrics.put("cache.nodeObject.hitRate", 0.6D);
+ expectedMetrics.put("cache.nodeObject.hitRate", 0.7142857142857143D);
expectedMetrics.put("cache.nodeObject.evictionCount", 0L);
expectedMetrics.put("cache.nodeObject.size", 2L);
nodeRepository.nodes().list();
- expectedMetrics.put("cache.curator.hitRate", 0.52D);
+ expectedMetrics.put("cache.curator.hitRate", 2D/3D);
expectedMetrics.put("cache.curator.evictionCount", 0L);
- expectedMetrics.put("cache.curator.size", 12L);
+ expectedMetrics.put("cache.curator.size", 3L);
tester.clock().setInstant(Instant.ofEpochSecond(124));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
index 9f9e6b85545..b5735cfae84 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/os/OsVersionsTest.java
@@ -121,17 +121,16 @@ public class OsVersionsTest {
// Activate target
for (int i = 0; i < totalNodes; i += maxActiveUpgrades) {
versions.resumeUpgradeOf(NodeType.host, true);
- var nodes = hostNodes.get();
- var nodesUpgrading = nodes.changingOsVersion();
+ NodeList nodes = hostNodes.get();
+ NodeList nodesUpgrading = nodes.changingOsVersion();
assertEquals("Target is changed for a subset of nodes", maxActiveUpgrades, nodesUpgrading.size());
assertEquals("Wanted version is set for nodes upgrading", version1,
minVersion(nodesUpgrading, OsVersion::wanted));
- var nodesOnLowestVersion = nodes.asList().stream()
- .sorted(Comparator.comparing(node -> node.status().osVersion().current().orElse(Version.emptyVersion)))
- .toList()
- .subList(0, maxActiveUpgrades);
+ NodeList nodesOnLowestVersion = nodes.sortedBy(Comparator.comparing(node -> node.status().osVersion().current().orElse(Version.emptyVersion)))
+ .first(maxActiveUpgrades);
assertEquals("Nodes on lowest version are told to upgrade",
- nodesUpgrading.asList(), nodesOnLowestVersion);
+ nodesUpgrading.hostnames(),
+ nodesOnLowestVersion.hostnames());
completeReprovisionOf(nodesUpgrading.asList());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
index 42a0dd982ad..c0d6ab90f06 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
@@ -28,10 +28,10 @@ public class CuratorDbTest {
@Test
public void can_read_stored_host_information() throws Exception {
- String zkline = "{\"hostname\":\"host1\",\"ipAddresses\":[\"127.0.0.1\"],\"additionalIpAddresses\":[\"127.0.0.2\"],\"openStackId\":\"7951bb9d-3989-4a60-a21c-13690637c8ea\",\"flavor\":\"default\",\"created\":1421054425159, \"type\":\"host\"}";
- curator.framework().create().creatingParentsIfNeeded().forPath("/provision/v1/ready/host1", zkline.getBytes());
+ String zkline = "{\"hostname\":\"host1\",\"state\":\"ready\",\"ipAddresses\":[\"127.0.0.1\"],\"additionalIpAddresses\":[\"127.0.0.2\"],\"openStackId\":\"7951bb9d-3989-4a60-a21c-13690637c8ea\",\"flavor\":\"default\",\"created\":1421054425159, \"type\":\"host\"}";
+ curator.framework().create().creatingParentsIfNeeded().forPath("/provision/v1/nodes/host1", zkline.getBytes());
- List<Node> allocatedNodes = zkClient.readNodes(Node.State.ready);
+ List<Node> allocatedNodes = zkClient.readNodes();
assertEquals(1, allocatedNodes.size());
assertEquals(NodeType.host, allocatedNodes.get(0).type());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
index ab487cc7d04..d61a3d95a65 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
@@ -67,7 +67,7 @@ public class NodeSerializerTest {
public void provisioned_node_serialization() {
Node node = createNode();
- Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
+ Node copy = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(node.hostname(), copy.hostname());
assertEquals(node.id(), copy.id());
assertEquals(node.state(), copy.state());
@@ -99,7 +99,7 @@ public class NodeSerializerTest {
node = node.downAt(Instant.ofEpochMilli(5), Agent.system)
.upAt(Instant.ofEpochMilli(6), Agent.system)
.downAt(Instant.ofEpochMilli(7), Agent.system);
- Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
+ Node copy = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(node.id(), copy.id());
assertEquals(node.hostname(), copy.hostname());
@@ -129,6 +129,7 @@ public class NodeSerializerTest {
String nodeData =
"{\n" +
" \"type\" : \"tenant\",\n" +
+ " \"state\" : \"provisioned\",\n" +
" \"rebootGeneration\" : 1,\n" +
" \"currentRebootGeneration\" : 2,\n" +
" \"flavor\" : \"large\",\n" +
@@ -159,7 +160,7 @@ public class NodeSerializerTest {
" \"ipAddresses\" : [\"127.0.0.1\"]\n" +
"}";
- Node node = nodeSerializer.fromJson(Node.State.provisioned, Utf8.toBytes(nodeData));
+ Node node = nodeSerializer.fromJson(Utf8.toBytes(nodeData));
assertEquals("large", node.flavor().name());
assertEquals(1, node.status().reboot().wanted());
@@ -187,7 +188,7 @@ public class NodeSerializerTest {
assertEquals(1, node.history().events().size());
clock.advance(Duration.ofMinutes(2));
node = node.retire(Agent.application, clock.instant());
- Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
+ Node copy = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(2, copy.history().events().size());
assertEquals(clock.instant().truncatedTo(MILLIS), copy.history().event(History.Event.Type.retired).get().at());
assertEquals(Agent.application,
@@ -195,34 +196,37 @@ public class NodeSerializerTest {
assertTrue(copy.allocation().get().membership().retired());
Node removable = copy.with(node.allocation().get().removable(true, true));
- Node removableCopy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(removable));
+ Node removableCopy = nodeSerializer.fromJson( nodeSerializer.toJson(removable));
assertTrue(removableCopy.allocation().get().removable());
assertTrue(removableCopy.allocation().get().reusable());
}
@Test
public void assimilated_node_deserialization() {
- Node node = nodeSerializer.fromJson(Node.State.active, ("{\n" +
- " \"type\": \"tenant\",\n" +
- " \"hostname\": \"assimilate2.vespahosted.yahoo.tld\",\n" +
- " \"ipAddresses\": [\"127.0.0.1\"],\n" +
- " \"openStackId\": \"\",\n" +
- " \"flavor\": \"ugccloud-container\",\n" +
- " \"instance\": {\n" +
- " \"tenantId\": \"by_mortent\",\n" +
- " \"applicationId\": \"ugc-assimilate\",\n" +
- " \"instanceId\": \"default\",\n" +
- " \"serviceId\": \"container/ugccloud-container/0/0\",\n" +
- " \"restartGeneration\": 0,\n" +
- " \"wantedVespaVersion\": \"6.42.2\"\n" +
- " }\n" +
- "}\n").getBytes());
+ Node node = nodeSerializer.fromJson(("""
+ {
+ "type": "tenant",
+ "hostname": "assimilate2.vespahosted.yahoo.tld",
+ "state": "provisioned",
+ "ipAddresses": ["127.0.0.1"],
+ "openStackId": "",
+ "flavor": "ugccloud-container",
+ "instance": {
+ "tenantId": "by_mortent",
+ "applicationId": "ugc-assimilate",
+ "instanceId": "default",
+ "serviceId": "container/ugccloud-container/0/0",
+ "restartGeneration": 0,
+ "wantedVespaVersion": "6.42.2"
+ }
+ }
+ """).getBytes());
assertEquals(0, node.history().events().size());
assertTrue(node.allocation().isPresent());
assertEquals("ugccloud-container", node.allocation().get().membership().cluster().id().value());
assertEquals("container", node.allocation().get().membership().cluster().type().name());
assertEquals(0, node.allocation().get().membership().cluster().group().get().index());
- Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
+ Node copy = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(0, copy.history().events().size());
}
@@ -237,7 +241,7 @@ public class NodeSerializerTest {
clock.instant());
node = node.with(node.status().withFailCount(0));
- Node copy2 = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
+ Node copy2 = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(0, copy2.status().failCount());
}
@@ -249,7 +253,7 @@ public class NodeSerializerTest {
.parentHostname(parentHostname)
.build();
- Node deserializedNode = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(node));
+ Node deserializedNode = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(parentHostname, deserializedNode.parentHostname().get());
}
@@ -257,7 +261,7 @@ public class NodeSerializerTest {
@Test
public void serializes_multiple_ip_addresses() {
byte[] nodeWithMultipleIps = createNodeJson("node4.yahoo.tld", "127.0.0.4", "::4");
- Node deserializedNode = nodeSerializer.fromJson(State.provisioned, nodeWithMultipleIps);
+ Node deserializedNode = nodeSerializer.fromJson(nodeWithMultipleIps);
assertEquals(ImmutableSet.of("127.0.0.4", "::4"), deserializedNode.ipConfig().primary());
}
@@ -269,13 +273,13 @@ public class NodeSerializerTest {
node = node.with(node.ipConfig().withPool(IP.Pool.of(
Set.of("::1", "::2", "::3"),
List.of(new Address("a"), new Address("b"), new Address("c")))));
- Node copy = nodeSerializer.fromJson(node.state(), nodeSerializer.toJson(node));
+ Node copy = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(node.ipConfig().pool().ipSet(), copy.ipConfig().pool().ipSet());
assertEquals(Set.copyOf(node.ipConfig().pool().getAddressList()), Set.copyOf(copy.ipConfig().pool().getAddressList()));
// Test round-trip without address pool (handle empty pool)
node = createNode();
- copy = nodeSerializer.fromJson(node.state(), nodeSerializer.toJson(node));
+ copy = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(node.ipConfig().pool().ipSet(), copy.ipConfig().pool().ipSet());
assertEquals(Set.copyOf(node.ipConfig().pool().getAddressList()), Set.copyOf(copy.ipConfig().pool().getAddressList()));
}
@@ -286,11 +290,12 @@ public class NodeSerializerTest {
"{\n" +
" \"type\" : \"tenant\",\n" +
" \"flavor\" : \"large\",\n" +
+ " \"state\" : \"provisioned\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"]\n" +
"}";
- Node node = nodeSerializer.fromJson(State.provisioned, Utf8.toBytes(nodeData));
+ Node node = nodeSerializer.fromJson(Utf8.toBytes(nodeData));
assertFalse(node.status().wantToRetire());
}
@@ -301,7 +306,7 @@ public class NodeSerializerTest {
node = node.with(node.flavor().with(FlavorOverrides.ofDisk(1234)), Agent.system, clock.instant());
assertEquals(1234, node.flavor().resources().diskGb(), 0);
- Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
+ Node copy = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(1234, copy.flavor().resources().diskGb(), 0);
assertEquals(node, copy);
assertTrue(node.history().event(History.Event.Type.resized).isPresent());
@@ -312,21 +317,22 @@ public class NodeSerializerTest {
String nodeData =
"{\n" +
" \"type\" : \"tenant\",\n" +
+ " \"state\" : \"provisioned\",\n" +
" \"flavor\" : \"large\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
" \"ipAddresses\" : [\"127.0.0.1\"]\n" +
"}";
- Node node = nodeSerializer.fromJson(State.provisioned, Utf8.toBytes(nodeData));
+ Node node = nodeSerializer.fromJson(Utf8.toBytes(nodeData));
assertFalse(node.status().wantToDeprovision());
}
@Test
public void want_to_rebuild() {
- Node node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(createNode()));
+ Node node = nodeSerializer.fromJson(nodeSerializer.toJson(createNode()));
assertFalse(node.status().wantToRebuild());
node = node.with(node.status().withWantToRetire(true, false, true));
- node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(node));
+ node = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertTrue(node.status().wantToRetire());
assertFalse(node.status().wantToDeprovision());
assertTrue(node.status().wantToRebuild());
@@ -337,6 +343,7 @@ public class NodeSerializerTest {
String nodeWithWantedVespaVersion =
"{\n" +
" \"type\" : \"tenant\",\n" +
+ " \"state\" : \"provisioned\",\n" +
" \"flavor\" : \"large\",\n" +
" \"openStackId\" : \"myId\",\n" +
" \"hostname\" : \"myHostname\",\n" +
@@ -349,13 +356,13 @@ public class NodeSerializerTest {
" \"wantedVespaVersion\": \"6.42.2\"\n" +
" }\n" +
"}";
- Node node = nodeSerializer.fromJson(State.active, Utf8.toBytes(nodeWithWantedVespaVersion));
+ Node node = nodeSerializer.fromJson(Utf8.toBytes(nodeWithWantedVespaVersion));
assertEquals("6.42.2", node.allocation().get().membership().cluster().vespaVersion().toString());
}
@Test
public void os_version_serialization() {
- Node serialized = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(createNode()));
+ Node serialized = nodeSerializer.fromJson(nodeSerializer.toJson(createNode()));
assertFalse(serialized.status().osVersion().current().isPresent());
// Update OS version
@@ -364,7 +371,7 @@ public class NodeSerializerTest {
serialized.history().event(History.Event.Type.osUpgraded).isPresent());
serialized = serialized.withCurrentOsVersion(Version.fromString("7.2"), Instant.ofEpochMilli(123))
.withCurrentOsVersion(Version.fromString("7.2"), Instant.ofEpochMilli(456));
- serialized = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(serialized));
+ serialized = nodeSerializer.fromJson(nodeSerializer.toJson(serialized));
assertEquals(Version.fromString("7.2"), serialized.status().osVersion().current().get());
var osUpgradedEvents = serialized.history().events().stream()
.filter(event -> event.type() == History.Event.Type.osUpgraded)
@@ -376,11 +383,11 @@ public class NodeSerializerTest {
@Test
public void firmware_check_serialization() {
- Node node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(createNode()));
+ Node node = nodeSerializer.fromJson(nodeSerializer.toJson(createNode()));
assertFalse(node.status().firmwareVerifiedAt().isPresent());
node = node.withFirmwareVerifiedAt(Instant.ofEpochMilli(100));
- node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(node));
+ node = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(100, node.status().firmwareVerifiedAt().get().toEpochMilli());
assertEquals(Instant.ofEpochMilli(100), node.history().event(History.Event.Type.firmwareVerified).get().at());
}
@@ -394,7 +401,7 @@ public class NodeSerializerTest {
@Test
public void reports_serialization() {
- Node node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(createNode()));
+ Node node = nodeSerializer.fromJson(nodeSerializer.toJson(createNode()));
assertTrue(node.reports().isEmpty());
var slime = new Slime();
@@ -407,7 +414,7 @@ public class NodeSerializerTest {
var reports = new Reports.Builder().setReport(report).build();
node = node.with(reports);
- node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(node));
+ node = nodeSerializer.fromJson(nodeSerializer.toJson(node));
reports = node.reports();
assertFalse(reports.isEmpty());
@@ -422,11 +429,11 @@ public class NodeSerializerTest {
@Test
public void model_id_serialization() {
- Node node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(createNode()));
+ Node node = nodeSerializer.fromJson(nodeSerializer.toJson(createNode()));
assertFalse(node.modelName().isPresent());
node = node.withModelName("some model");
- node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(node));
+ node = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals("some model", node.modelName().get());
}
@@ -447,7 +454,7 @@ public class NodeSerializerTest {
node = node.with(node.allocation().get().withNetworkPorts(ports));
assertTrue(node.allocation().isPresent());
assertTrue(node.allocation().get().networkPorts().isPresent());
- Node copy = nodeSerializer.fromJson(Node.State.provisioned, nodeSerializer.toJson(node));
+ Node copy = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertTrue(copy.allocation().isPresent());
assertTrue(copy.allocation().get().networkPorts().isPresent());
NetworkPorts portsCopy = node.allocation().get().networkPorts().get();
@@ -457,11 +464,11 @@ public class NodeSerializerTest {
@Test
public void switch_hostname_serialization() {
- Node node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(createNode()));
+ Node node = nodeSerializer.fromJson(nodeSerializer.toJson(createNode()));
assertFalse(node.switchHostname().isPresent());
String switchHostname = "switch0.example.com";
node = node.withSwitchHostname(switchHostname);
- node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(node));
+ node = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(switchHostname, node.switchHostname().get());
}
@@ -469,25 +476,25 @@ public class NodeSerializerTest {
public void exclusive_to_serialization() {
Node.Builder builder = Node.create("myId", IP.Config.EMPTY, "myHostname",
nodeFlavors.getFlavorOrThrow("default"), NodeType.host);
- Node node = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(builder.build()));
+ Node node = nodeSerializer.fromJson(nodeSerializer.toJson(builder.build()));
assertFalse(node.exclusiveToApplicationId().isPresent());
assertFalse(node.exclusiveToClusterType().isPresent());
ApplicationId exclusiveToApp = ApplicationId.from("tenant1", "app1", "instance1");
ClusterSpec.Type exclusiveToCluster = ClusterSpec.Type.admin;
node = builder.exclusiveToApplicationId(exclusiveToApp).exclusiveToClusterType(exclusiveToCluster).build();
- node = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(node));
+ node = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(exclusiveToApp, node.exclusiveToApplicationId().get());
assertEquals(exclusiveToCluster, node.exclusiveToClusterType().get());
}
@Test
public void truststore_serialization() {
- Node node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(createNode()));
+ Node node = nodeSerializer.fromJson(nodeSerializer.toJson(createNode()));
assertEquals(List.of(), node.trustedCertificates());
List<TrustStoreItem> trustStoreItems = List.of(new TrustStoreItem("foo", Instant.parse("2023-09-01T23:59:59Z")), new TrustStoreItem("bar", Instant.parse("2025-05-20T23:59:59Z")));
node = node.with(trustStoreItems);
- node = nodeSerializer.fromJson(State.active, nodeSerializer.toJson(node));
+ node = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(trustStoreItems, node.trustedCertificates());
}
@@ -498,7 +505,7 @@ public class NodeSerializerTest {
.cloudAccount(account)
.exclusiveToApplicationId(ApplicationId.defaultId())
.build();
- node = nodeSerializer.fromJson(State.provisioned, nodeSerializer.toJson(node));
+ node = nodeSerializer.fromJson(nodeSerializer.toJson(node));
assertEquals(account, node.cloudAccount());
}
@@ -514,6 +521,7 @@ public class NodeSerializerTest {
return ("{\"hostname\":\"" + hostname + "\"," +
ipAddressJsonPart +
"\"openStackId\":\"myId\"," +
+ "\"state\":\"provisioned\"," +
"\"flavor\":\"default\",\"rebootGeneration\":0," +
"\"currentRebootGeneration\":0,\"failCount\":0,\"history\":[],\"type\":\"tenant\"}")
.getBytes(StandardCharsets.UTF_8);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
index d26ac4d3916..30a49a89e12 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/AclProvisioningTest.java
@@ -196,11 +196,16 @@ public class AclProvisioningTest {
// ACL for nodes with allocation trust their respective load balancer networks, if any
for (var host : hosts) {
- var acls = tester.nodeRepository().getChildAcls(host);
+ List<NodeAcl> acls = tester.nodeRepository().getChildAcls(host);
assertEquals(2, acls.size());
- assertEquals(Set.of(), acls.get(0).trustedNetworks());
- assertEquals(application, acls.get(1).node().allocation().get().owner());
- assertEquals(lbNetworks, acls.get(1).trustedNetworks());
+ for (var acl : acls) {
+ if (acl.node().allocation().isPresent()) {
+ assertEquals(lbNetworks, acl.trustedNetworks());
+ assertEquals(application, acl.node().allocation().get().owner());
+ } else {
+ assertEquals(Set.of(), acl.trustedNetworks());
+ }
+ }
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index 30bd1250430..e32643860f5 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -85,8 +85,8 @@ public class LoadBalancerProvisionerTest {
assertEquals(4443, get(lbApp1.get().get(0).instance().get().reals(), 1).port());
// A container is failed
- Supplier<List<Node>> containers = () -> tester.getNodes(app1).container().asList();
- Node toFail = containers.get().get(0);
+ Supplier<NodeList> containers = () -> tester.getNodes(app1).container();
+ Node toFail = containers.get().first().get();
tester.nodeRepository().nodes().fail(toFail.hostname(), Agent.system, this.getClass().getSimpleName());
// Redeploying replaces failed node and removes it from load balancer
@@ -99,8 +99,8 @@ public class LoadBalancerProvisionerTest {
.map(Real::hostname)
.map(DomainName::value)
.noneMatch(hostname -> hostname.equals(toFail.hostname())));
- assertEquals(containers.get().get(0).hostname(), get(loadBalancer.instance().get().reals(), 0).hostname().value());
- assertEquals(containers.get().get(1).hostname(), get(loadBalancer.instance().get().reals(), 1).hostname().value());
+ assertEquals(containers.get().state(Node.State.active).hostnames(),
+ loadBalancer.instance().get().reals().stream().map(r -> r.hostname().value()).collect(Collectors.toSet()));
assertSame("State is unchanged", LoadBalancer.State.active, loadBalancer.state());
// Add another container cluster to first app
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index 110569a371a..5e9549aafbb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -328,7 +328,7 @@ public class ProvisioningTester {
}
public void fail(String hostname) {
- int beforeFailCount = nodeRepository.nodes().node(hostname, Node.State.active).get().status().failCount();
+ int beforeFailCount = nodeRepository.nodes().node(hostname).get().status().failCount();
Node failedNode = nodeRepository.nodes().fail(hostname, Agent.system, "Failing to unit test");
assertTrue(nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().contains(failedNode));
assertEquals(beforeFailCount + 1, failedNode.status().failCount());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
index f5fe0e6aafd..9cc7d81055d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
@@ -796,8 +796,8 @@ public class NodesV2ApiTest {
// Filter nodes by osVersion
assertResponse(new Request("http://localhost:8080/nodes/v2/node/?osVersion=7.5.2"),
"{\"nodes\":[" +
- "{\"url\":\"http://localhost:8080/nodes/v2/node/dockerhost2.yahoo.com\"}," +
- "{\"url\":\"http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com\"}" +
+ "{\"url\":\"http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com\"}," +
+ "{\"url\":\"http://localhost:8080/nodes/v2/node/dockerhost2.yahoo.com\"}" +
"]}");
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/active-nodes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/active-nodes.json
index c46bc6acbd2..15e93ce40a0 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/active-nodes.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/active-nodes.json
@@ -1,19 +1,19 @@
{
"nodes": [
+ @include(cfg1.json),
+ @include(cfg2.json),
+ @include(docker-node1.json),
+ @include(docker-node2.json),
+ @include(docker-node3.json),
+ @include(docker-node4.json),
+ @include(docker-node5.json),
+ @include(node1.json),
@include(node10.json),
+ @include(node13.json),
@include(node14.json),
+ @include(node2.json),
@include(node4.json),
@include(node6.json),
- @include(docker-container1.json),
- @include(node13.json),
- @include(node2.json),
- @include(node1.json),
- @include(docker-node3.json),
- @include(docker-node4.json),
- @include(docker-node5.json),
- @include(docker-node2.json),
- @include(docker-node1.json),
- @include(cfg1.json),
- @include(cfg2.json)
+ @include(docker-container1.json)
]
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2-nodes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2-nodes.json
index 4581ecba73d..6ffdb05fa67 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2-nodes.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2-nodes.json
@@ -1,6 +1,6 @@
{
"nodes": [
- @include(node6.json),
- @include(node2.json)
+ @include(node2.json),
+ @include(node6.json)
]
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/content-nodes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/content-nodes.json
index 134d27f5717..cbf79f5b55d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/content-nodes.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/content-nodes.json
@@ -1,8 +1,8 @@
{
"nodes": [
+ @include(node2.json),
@include(node4.json),
@include(node6.json),
- @include(docker-container1.json),
- @include(node2.json)
+ @include(docker-container1.json)
]
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/enclave-nodes-recursive.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/enclave-nodes-recursive.json
index 5c728d77920..540a0086cbf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/enclave-nodes-recursive.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/enclave-nodes-recursive.json
@@ -1,6 +1,6 @@
{
"nodes": [
- @include(node3.json),
- @include(docker-node2.json)
+ @include(docker-node2.json),
+ @include(node3.json)
]
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/enclave-nodes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/enclave-nodes.json
index 5bbc683c92a..33fd4daa699 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/enclave-nodes.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/enclave-nodes.json
@@ -1,10 +1,10 @@
{
"nodes":[
{
- "url":"http://localhost:8080/nodes/v2/node/host3.yahoo.com"
+ "url":"http://localhost:8080/nodes/v2/node/dockerhost2.yahoo.com"
},
{
- "url":"http://localhost:8080/nodes/v2/node/dockerhost2.yahoo.com"
+ "url":"http://localhost:8080/nodes/v2/node/host3.yahoo.com"
}
]
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes-recursive-include-deprovisioned.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes-recursive-include-deprovisioned.json
index 66b44726e7e..dc6fd3a317d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes-recursive-include-deprovisioned.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes-recursive-include-deprovisioned.json
@@ -1,24 +1,24 @@
{
"nodes": [
- @include(node7.json),
- @include(node3.json),
- @include(node10.json),
@include(cfg1.json),
@include(cfg2.json),
+ @include(docker-node1.json),
+ @include(docker-node2.json),
@include(docker-node3.json),
- @include(node14.json),
- @include(node4.json),
@include(docker-node4.json),
- @include(node6.json),
- @include(docker-container1.json),
@include(docker-node5.json),
- @include(docker-node2.json),
+ @include(dockerhost6.json),
+ @include(node1.json),
+ @include(node10.json),
@include(node13.json),
+ @include(node14.json),
@include(node2.json),
- @include(docker-node1.json),
- @include(node1.json),
- @include(node55.json),
+ @include(node3.json),
+ @include(node4.json),
@include(node5.json),
- @include(dockerhost6.json)
+ @include(node55.json),
+ @include(node6.json),
+ @include(node7.json),
+ @include(docker-container1.json)
]
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes-recursive.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes-recursive.json
index 7b52bc576ae..bd3b743e98e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes-recursive.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes-recursive.json
@@ -1,23 +1,23 @@
{
"nodes": [
- @include(node7.json),
- @include(node3.json),
- @include(node10.json),
@include(cfg1.json),
@include(cfg2.json),
+ @include(docker-node1.json),
+ @include(docker-node2.json),
@include(docker-node3.json),
- @include(node14.json),
- @include(node4.json),
@include(docker-node4.json),
- @include(node6.json),
- @include(docker-container1.json),
@include(docker-node5.json),
- @include(docker-node2.json),
+ @include(node1.json),
+ @include(node10.json),
@include(node13.json),
+ @include(node14.json),
@include(node2.json),
- @include(docker-node1.json),
- @include(node1.json),
+ @include(node3.json),
+ @include(node4.json),
+ @include(node5.json),
@include(node55.json),
- @include(node5.json)
+ @include(node6.json),
+ @include(node7.json),
+ @include(docker-container1.json)
]
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes.json
index 86da5fb6e62..8147f92df31 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/nodes.json
@@ -1,61 +1,61 @@
{
"nodes": [
{
- "url": "http://localhost:8080/nodes/v2/node/host7.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/cfg1.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/host3.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/cfg2.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/host10.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/cfg1.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/dockerhost2.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/cfg2.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/dockerhost3.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/dockerhost3.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/dockerhost4.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/host14.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/dockerhost5.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/host4.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/host1.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/dockerhost4.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/host10.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/host6.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/host13.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/test-node-pool-102-2"
+ "url": "http://localhost:8080/nodes/v2/node/host14.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/dockerhost5.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/host2.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/dockerhost2.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/host3.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/host13.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/host4.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/host2.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/host5.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/host55.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/host1.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/host6.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/host55.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/host7.yahoo.com"
},
{
- "url": "http://localhost:8080/nodes/v2/node/host5.yahoo.com"
+ "url": "http://localhost:8080/nodes/v2/node/test-node-pool-102-2"
}
]
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/states-recursive.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/states-recursive.json
index 5ece0e642f1..2bdffea108a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/states-recursive.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/states-recursive.json
@@ -20,21 +20,21 @@
"active": {
"url": "http://localhost:8080/nodes/v2/state/active",
"nodes": [
+ @include(cfg1.json),
+ @include(cfg2.json),
+ @include(docker-node1.json),
+ @include(docker-node2.json),
+ @include(docker-node3.json),
+ @include(docker-node4.json),
+ @include(docker-node5.json),
+ @include(node1.json),
@include(node10.json),
+ @include(node13.json),
@include(node14.json),
+ @include(node2.json),
@include(node4.json),
@include(node6.json),
- @include(docker-container1.json),
- @include(node13.json),
- @include(node2.json),
- @include(node1.json),
- @include(docker-node3.json),
- @include(docker-node4.json),
- @include(docker-node5.json),
- @include(docker-node2.json),
- @include(docker-node1.json),
- @include(cfg1.json),
- @include(cfg2.json)
+ @include(docker-container1.json)
]
},
"inactive": {
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h b/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h
index 93fe77ba50b..2d6d367af3b 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_base.h
@@ -78,7 +78,7 @@ protected:
GenerationHandler _generationHandler;
DictionaryTree _dict;
FeatureStore _featureStore;
- uint32_t _fieldId;
+ const uint32_t _fieldId;
FieldIndexRemover _remover;
std::unique_ptr<IOrderedFieldIndexInserter> _inserter;
index::FieldLengthCalculator _calculator;
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.h b/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.h
index 7a65195e843..a9f597e6296 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_index_collection.h
@@ -28,7 +28,7 @@ private:
using GenerationHandler = vespalib::GenerationHandler;
std::vector<std::unique_ptr<IFieldIndex>> _fieldIndexes;
- uint32_t _numFields;
+ const uint32_t _numFields;
public:
FieldIndexCollection(const index::Schema& schema, const index::IFieldLengthInspector& inspector);
diff --git a/searchlib/src/vespa/searchlib/memoryindex/field_inverter.h b/searchlib/src/vespa/searchlib/memoryindex/field_inverter.h
index e68f664603e..7995dc56de8 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/field_inverter.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/field_inverter.h
@@ -97,7 +97,7 @@ private:
class ElemInfo {
public:
- int32_t _weight;
+ const int32_t _weight;
uint32_t _len;
uint32_t _field_length;
@@ -161,7 +161,7 @@ private:
using UInt32Vector = std::vector<uint32_t, vespalib::allocator_large<uint32_t>>;
// Current field state.
- uint32_t _fieldId; // current field id
+ const uint32_t _fieldId; // current field id
uint32_t _elem; // current element
uint32_t _wpos; // current word pos
uint32_t _docId;
diff --git a/searchlib/src/vespa/searchlib/memoryindex/invert_task.h b/searchlib/src/vespa/searchlib/memoryindex/invert_task.h
index b898deb5c16..a351fd2a10f 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/invert_task.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/invert_task.h
@@ -27,7 +27,7 @@ class InvertTask : public vespalib::Executor::Task
const std::vector<std::unique_ptr<FieldInverter>>& _inverters;
const std::vector<std::unique_ptr<UrlFieldInverter>>& _uri_inverters;
const document::Document& _doc;
- uint32_t _lid;
+ const uint32_t _lid;
std::remove_reference_t<OnWriteDoneType> _on_write_done;
public:
InvertTask(const DocumentInverterContext& inv_context, const InvertContext& context, const std::vector<std::unique_ptr<FieldInverter>>& inverters, const std::vector<std::unique_ptr<UrlFieldInverter>>& uri_inverters, uint32_t lid, const document::Document& doc, OnWriteDoneType on_write_done);
diff --git a/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h b/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h
index ef0866e957b..a8cc7fce1f2 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/posting_list_entry.h
@@ -13,7 +13,6 @@ class InterleavedFeatures {
protected:
uint16_t _num_occs;
uint16_t _field_length;
-
public:
InterleavedFeatures()
: _num_occs(0),
diff --git a/vespalib/CMakeLists.txt b/vespalib/CMakeLists.txt
index a3d5054973f..2ceb56bf226 100644
--- a/vespalib/CMakeLists.txt
+++ b/vespalib/CMakeLists.txt
@@ -21,6 +21,7 @@ vespa_define_module(
src/apps/vespa-drop-file-from-cache
src/apps/vespa-probe-io-uring
src/apps/vespa-resource-limits
+ src/apps/vespa-stress-and-validate-memory
src/apps/vespa-tsan-digest
src/apps/vespa-validate-hostname
diff --git a/vespalib/src/apps/vespa-stress-and-validate-memory/.gitignore b/vespalib/src/apps/vespa-stress-and-validate-memory/.gitignore
new file mode 100644
index 00000000000..77cf05d77d5
--- /dev/null
+++ b/vespalib/src/apps/vespa-stress-and-validate-memory/.gitignore
@@ -0,0 +1 @@
+vespa-stress-and-validate-memory
diff --git a/vespalib/src/apps/vespa-stress-and-validate-memory/CMakeLists.txt b/vespalib/src/apps/vespa-stress-and-validate-memory/CMakeLists.txt
new file mode 100644
index 00000000000..17ea9d709df
--- /dev/null
+++ b/vespalib/src/apps/vespa-stress-and-validate-memory/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_stress-and-validate-memory_app
+ SOURCES
+ stress_and_validate_memory.cpp
+ OUTPUT_NAME vespa-stress-and-validate-memory
+ INSTALL bin
+ DEPENDS
+ vespalib
+)
diff --git a/vespalib/src/apps/vespa-stress-and-validate-memory/stress_and_validate_memory.cpp b/vespalib/src/apps/vespa-stress-and-validate-memory/stress_and_validate_memory.cpp
new file mode 100644
index 00000000000..a7a227e45b3
--- /dev/null
+++ b/vespalib/src/apps/vespa-stress-and-validate-memory/stress_and_validate_memory.cpp
@@ -0,0 +1,262 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/util/mmap_file_allocator.h>
+#include <vespa/vespalib/util/size_literals.h>
+#include <vespa/vespalib/util/time.h>
+#include <thread>
+#include <vector>
+#include <atomic>
+#include <cstring>
+#include <mutex>
+#include <filesystem>
+#include <iostream>
+
+std::atomic<bool> stopped = false;
+std::mutex log_mutex;
+using namespace vespalib;
+
+const char * description =
+ "Runs stress test of memory by slowly growing a heap filled with 0.\n"
+ "Each core on the node will then continously read back and verify random memory sections still being zero.\n"
+ "-h heap_in_GB(1) and -t run_time_in_seconds(10) are the options available.\n"
+ "Memory will grow slowly during the first half of the test and then stay put.\n"
+ "There is also the option to include stress testing of swap files by using -s <directory>.\n"
+ "The swap will grow to twice the heap size in the same manner.\n"
+ "Swap memory is stressed by constant random writing from all cores.\n";
+
+class Config {
+public:
+ Config(size_t heap_size, size_t nprocs, size_t allocs_per_thread, duration alloc_time)
+ : _heap_size(heap_size),
+ _nprocs(nprocs),
+ _allocs_per_thread(allocs_per_thread),
+ _alloc_time(alloc_time)
+ {}
+ size_t allocs_per_thread() const { return _allocs_per_thread; }
+ duration alloc_time() const { return _alloc_time; }
+ size_t alloc_size() const { return _heap_size / _nprocs / _allocs_per_thread; }
+ size_t nprocs() const { return _nprocs; }
+ size_t heap_size() const { return _heap_size; }
+private:
+ const size_t _heap_size;
+ const size_t _nprocs;
+ const size_t _allocs_per_thread;
+ const duration _alloc_time;
+};
+
+class Allocations {
+public:
+ Allocations(const Config & config);
+ ~Allocations();
+ size_t make_and_load_alloc_per_thread();
+ size_t verify_random_allocation(unsigned int *seed) const;
+ const Config & cfg() const { return _cfg; }
+ size_t verify_and_report_errors() const {
+ std::lock_guard guard(_mutex);
+ for (const auto & alloc : _allocations) {
+ _total_errors += verify_allocation(alloc.get());
+ }
+ return _total_errors;
+ }
+private:
+ size_t verify_allocation(const char *) const;
+ const Config & _cfg;
+ mutable std::mutex _mutex;
+ mutable size_t _total_errors;
+ std::vector<std::unique_ptr<char[]>> _allocations;
+};
+
+Allocations::Allocations(const Config & config)
+ : _cfg(config),
+ _mutex(),
+ _total_errors(0),
+ _allocations()
+{
+ _allocations.reserve(config.nprocs() * config.allocs_per_thread());
+ std::cout << "Starting memory stress with " << config.nprocs() << " threads and heap size " << (config.heap_size()/1_Mi) << " mb. Allocation size = " << config.alloc_size() << std::endl;
+}
+
+Allocations::~Allocations() = default;
+
+size_t
+Allocations::make_and_load_alloc_per_thread() {
+ auto alloc = std::make_unique<char[]>(cfg().alloc_size());
+ memset(alloc.get(), 0, cfg().alloc_size());
+ std::lock_guard guard(_mutex);
+ _allocations.push_back(std::move(alloc));
+ return 1;
+}
+
+size_t
+Allocations::verify_random_allocation(unsigned int *seed) const {
+ const char * alloc;
+ {
+ std::lock_guard guard(_mutex);
+ alloc = _allocations[rand_r(seed) % _allocations.size()].get();
+ }
+ size_t error_count = verify_allocation(alloc);
+ std::lock_guard guard(_mutex);
+ _total_errors += error_count;
+ return error_count;
+}
+
+size_t
+Allocations::verify_allocation(const char * alloc) const {
+ size_t error_count = 0;
+ for (size_t i = 0; i < cfg().alloc_size(); i++) {
+ if (alloc[i] != 0) {
+ error_count++;
+ std::lock_guard guard(log_mutex);
+ std::cout << "Thread " << std::this_thread::get_id() << ": Unexpected byte(" << std::hex << int(alloc[i]) << ") at " << static_cast<const void *>(alloc + i) << std::endl;
+ }
+ }
+ return error_count;
+}
+
+class FileBackedMemory {
+public:
+ FileBackedMemory(const Config & config, std::string dir);
+ ~FileBackedMemory();
+ const Config & cfg() const { return _cfg; }
+ size_t make_and_load_alloc_per_thread();
+ void random_write(unsigned int *seed);
+private:
+ using PtrAndSize = std::pair<void *, size_t>;
+ const Config & _cfg;
+ mutable std::mutex _mutex;
+ alloc::MmapFileAllocator _allocator;
+ std::vector<PtrAndSize> _allocations;
+};
+
+FileBackedMemory::FileBackedMemory(const Config & config, std::string dir)
+ : _cfg(config),
+ _mutex(),
+ _allocator(dir),
+ _allocations()
+{
+ _allocations.reserve(config.nprocs() * config.allocs_per_thread());
+ std::cout << "Starting mmapped stress in '" << dir << "' with " << config.nprocs() << " threads and heap size " << (config.heap_size()/1_Mi) << " mb. Allocation size = " << config.alloc_size() << std::endl;
+}
+
+FileBackedMemory::~FileBackedMemory() {
+ std::lock_guard guard(_mutex);
+ for (auto ptrAndSize : _allocations) {
+ _allocator.free(ptrAndSize);
+ }
+}
+
+
+size_t
+FileBackedMemory::make_and_load_alloc_per_thread() {
+ PtrAndSize alloc;
+ {
+ std::lock_guard guard(_mutex);
+ alloc = _allocator.alloc(cfg().alloc_size());
+ }
+ memset(alloc.first, 0, cfg().alloc_size());
+ std::lock_guard guard(_mutex);
+ _allocations.push_back(std::move(alloc));
+ return 1;
+}
+
+void
+FileBackedMemory::random_write(unsigned int *seed) {
+ PtrAndSize ptrAndSize;
+ {
+ std::lock_guard guard(_mutex);
+ ptrAndSize = _allocations[rand_r(seed) % _allocations.size()];
+ }
+ memset(ptrAndSize.first, rand_r(seed)%256, ptrAndSize.second);
+}
+
+void
+stress_and_validate_heap(Allocations *allocs) {
+ size_t num_verifications = 0;
+ size_t num_errors = 0;
+ size_t num_allocs = allocs->make_and_load_alloc_per_thread();
+ const size_t max_allocs = allocs->cfg().allocs_per_thread();
+ const double alloc_time = to_s(allocs->cfg().alloc_time());
+ steady_time start = steady_clock::now();
+ unsigned int seed = start.time_since_epoch().count()%4294967291ul;
+ for (;!stopped; num_verifications++) {
+ num_errors += allocs->verify_random_allocation(&seed);
+ double ratio = to_s(steady_clock::now() - start) / alloc_time;
+ if (num_allocs < std::min(size_t(ratio*max_allocs), max_allocs)) {
+ num_allocs += allocs->make_and_load_alloc_per_thread();
+ }
+ }
+ std::lock_guard guard(log_mutex);
+ std::cout << "Thread " << std::this_thread::get_id() << ": Completed " << num_verifications << " verifications with " << num_errors << std::endl;
+}
+
+void
+stress_file_backed_memory(FileBackedMemory * mmapped) {
+ size_t num_writes = 0;
+ size_t num_allocs = mmapped->make_and_load_alloc_per_thread();
+ const size_t max_allocs = mmapped->cfg().allocs_per_thread();
+ const double alloc_time = to_s(mmapped->cfg().alloc_time());
+ steady_time start = steady_clock::now();
+ unsigned int seed = start.time_since_epoch().count()%4294967291ul;
+ for (;!stopped; num_writes++) {
+ mmapped->random_write(&seed);
+ double ratio = to_s(steady_clock::now() - start) / alloc_time;
+ if (num_allocs < std::min(size_t(ratio*max_allocs), max_allocs)) {
+ num_allocs += mmapped->make_and_load_alloc_per_thread();
+ }
+ }
+ std::lock_guard guard(log_mutex);
+ std::cout << "Thread " << std::this_thread::get_id() << ": Completed " << num_writes << " writes" << std::endl;
+}
+
+int
+main(int argc, char *argv[]) {
+ size_t heapSize = 1_Gi;
+ duration runTime = 10s;
+ std::string swap_dir;
+ std::cout << description << std::endl;
+ for (int i = 1; i+2 <= argc; i+=2) {
+ char option = argv[i][strlen(argv[i]) - 1];
+ char *arg = argv[i+1];
+ switch (option) {
+ case 'h': heapSize = atof(arg) * 1_Gi; break;
+ case 's': swap_dir = arg; break;
+ case 't': runTime = from_s(atof(arg)); break;
+ default:
+ std::cerr << "Option " << option << " not in allowed set [h,s,t]" << std::endl;
+ break;
+ }
+ }
+ size_t nprocs = std::thread::hardware_concurrency();
+ size_t allocations_per_thread = 1024;
+
+ Config cfgHeap(heapSize, nprocs, allocations_per_thread, runTime/2);
+ Config cfgFile(heapSize*2, nprocs, allocations_per_thread, runTime/2);
+ Allocations allocations(cfgHeap);
+ std::unique_ptr<FileBackedMemory> filebackedMemory;
+
+ std::vector<std::thread> heapValidators;
+ heapValidators.reserve(nprocs*2);
+ for (unsigned int i = 0; i < nprocs; i++) {
+ heapValidators.emplace_back(stress_and_validate_heap, &allocations);
+ }
+ if ( ! swap_dir.empty()) {
+ std::filesystem::create_directories(swap_dir);
+ filebackedMemory = std::make_unique<FileBackedMemory>(cfgFile, swap_dir);
+ for (unsigned int i = 0; i < nprocs; i++) {
+ heapValidators.emplace_back(stress_file_backed_memory, filebackedMemory.get());
+ }
+ }
+ std::cout << "Running memory stresstest for " << to_s(runTime) << " seconds" << std::endl;
+ steady_time eot = steady_clock::now() + runTime;
+ while (steady_clock::now() < eot) {
+ std::this_thread::sleep_for(1s);
+ }
+ stopped = true;
+ for (auto & th : heapValidators) {
+ th.join();
+ }
+ heapValidators.clear();
+ size_t num_errors = allocations.verify_and_report_errors();
+ std::cout << "Completed stresstest with " << num_errors << " errors" << std::endl;
+ return num_errors == 0 ? 0 : 1;
+}
diff --git a/vespalib/src/tests/data/smart_buffer/smart_buffer_test.cpp b/vespalib/src/tests/data/smart_buffer/smart_buffer_test.cpp
index c461da1fcf7..b860aa3326a 100644
--- a/vespalib/src/tests/data/smart_buffer/smart_buffer_test.cpp
+++ b/vespalib/src/tests/data/smart_buffer/smart_buffer_test.cpp
@@ -24,6 +24,7 @@ TEST("require that basic read/write works") {
SmartBuffer buf(3);
TEST_DO(checkBuffer("", buf));
{ // read from empty buffer
+ EXPECT_TRUE(buf.empty());
EXPECT_EQUAL(0u, buf.obtain().size);
}
{ // write to buffer
@@ -34,6 +35,7 @@ TEST("require that basic read/write works") {
mem.data[1] = 'b';
mem.data[2] = 'c';
EXPECT_EQUAL(&buf, &buf.commit(3));
+ EXPECT_FALSE(buf.empty());
mem = buf.reserve(0);
TEST_DO(checkBuffer("abc", buf));
EXPECT_LESS_EQUAL(0u, mem.size);
@@ -61,6 +63,7 @@ TEST("require that basic read/write works") {
EXPECT_LESS_EQUAL(5u, mem.size);
}
{ // read until end
+ EXPECT_FALSE(buf.empty());
Memory mem = buf.obtain();
TEST_DO(checkBuffer("cd", buf));
TEST_DO(checkMemory("cd", mem));
@@ -69,6 +72,7 @@ TEST("require that basic read/write works") {
TEST_DO(checkBuffer("d", buf));
TEST_DO(checkMemory("d", mem));
EXPECT_EQUAL(&buf, &buf.evict(1));
+ EXPECT_TRUE(buf.empty());
mem = buf.obtain();
TEST_DO(checkBuffer("", buf));
TEST_DO(checkMemory("", mem));
@@ -83,16 +87,21 @@ TEST("require that requested initial size is not adjusted") {
TEST("require that buffer auto-resets when empty") {
SmartBuffer buf(64);
EXPECT_EQUAL(buf.reserve(10).size, 64u);
+ EXPECT_TRUE(buf.empty());
write_buf("abc", buf);
+ EXPECT_FALSE(buf.empty());
EXPECT_EQUAL(buf.reserve(10).size, 61u);
buf.evict(3);
+ EXPECT_TRUE(buf.empty());
EXPECT_EQUAL(buf.reserve(10).size, 64u);
}
TEST("require that buffer can grow") {
SmartBuffer buf(64);
EXPECT_EQUAL(buf.capacity(), 64u);
+ EXPECT_TRUE(buf.empty());
write_buf("abc", buf);
+ EXPECT_FALSE(buf.empty());
write_buf("abc", buf);
buf.evict(3);
EXPECT_EQUAL(buf.reserve(70).size, size_t(128 - 3));
@@ -103,7 +112,9 @@ TEST("require that buffer can grow") {
TEST("require that buffer can grow more than 2x") {
SmartBuffer buf(64);
EXPECT_EQUAL(buf.capacity(), 64u);
+ EXPECT_TRUE(buf.empty());
write_buf("abc", buf);
+ EXPECT_FALSE(buf.empty());
write_buf("abc", buf);
buf.evict(3);
EXPECT_EQUAL(buf.reserve(170).size, 170u);
@@ -114,7 +125,9 @@ TEST("require that buffer can grow more than 2x") {
TEST("require that buffer can be compacted") {
SmartBuffer buf(16);
EXPECT_EQUAL(buf.capacity(), 16u);
+ EXPECT_TRUE(buf.empty());
write_buf("abc", buf);
+ EXPECT_FALSE(buf.empty());
write_buf("abc", buf);
buf.evict(3);
write_buf("abc", buf);
@@ -133,6 +146,7 @@ TEST("require that buffer can be compacted") {
TEST("require that a completely empty buffer can be created") {
SmartBuffer buf(0);
EXPECT_EQUAL(buf.capacity(), 0u);
+ EXPECT_TRUE(buf.empty());
EXPECT_TRUE(buf.obtain().data == nullptr);
}
diff --git a/vespalib/src/vespa/vespalib/data/smart_buffer.h b/vespalib/src/vespa/vespalib/data/smart_buffer.h
index 17fb7614f0e..fc7042c5eea 100644
--- a/vespalib/src/vespa/vespalib/data/smart_buffer.h
+++ b/vespalib/src/vespa/vespalib/data/smart_buffer.h
@@ -33,9 +33,10 @@ private:
public:
SmartBuffer(size_t initial_size);
~SmartBuffer();
+ bool empty() const { return (read_len() == 0); }
size_t capacity() const { return _data.size(); }
void drop_if_empty() {
- if ((read_len() == 0) && (_data.size() > 0)) {
+ if (empty() && (_data.size() > 0)) {
drop();
}
}
diff --git a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.h b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.h
index 3b7b0039fab..0a83bfb4e60 100644
--- a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.h
+++ b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.h
@@ -13,7 +13,7 @@ namespace vespalib::alloc {
/*
* Class handling memory allocations backed by one or more files.
- * Not reentant. Should not be destructed before all allocations
+ * Not reentrant or thread safe. Should not be destructed before all allocations
* have been freed.
*/
class MmapFileAllocator : public MemoryAllocator {
@@ -30,8 +30,8 @@ class MmapFileAllocator : public MemoryAllocator {
{
}
};
- vespalib::string _dir_name;
- mutable File _file;
+ const vespalib::string _dir_name;
+ mutable File _file;
mutable uint64_t _end_offset;
mutable hash_map<void *, SizeAndOffset> _allocations;
mutable FileAreaFreeList _freelist;