summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--README.md7
-rw-r--r--TODO.md2
-rw-r--r--config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java81
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java27
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/Search.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java2
-rw-r--r--config-model/src/test/derived/namecollision/collision.sd8
-rw-r--r--config-model/src/test/derived/namecollision/collisionstruct.sd15
-rw-r--r--config-model/src/test/derived/namecollision/documentmanager.cfg55
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java20
-rw-r--r--config-proxy/src/main/sh/vespa-config-loadtester.sh2
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java96
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java10
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java13
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java5
-rw-r--r--configserver/src/test/resources/metrics/clustercontroller_metrics.json42
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java7
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java12
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java17
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java31
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntry.java27
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntrySerializer.java63
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java80
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainer.java66
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java94
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java20
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java38
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java13
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java31
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java14
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java25
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java11
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java8
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java93
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainerTest.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java70
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java30
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java71
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java54
-rw-r--r--default_build_settings.cmake6
-rw-r--r--dist/vespa.spec46
-rw-r--r--document/src/main/java/com/yahoo/document/StructDataType.java2
-rw-r--r--document/src/main/java/com/yahoo/document/StructuredDataType.java2
-rw-r--r--document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java3
-rw-r--r--eval/src/vespa/eval/eval/array_array_map.h2
-rw-r--r--filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java24
-rw-r--r--flags/pom.xml5
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java24
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java7
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java24
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java41
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintainer.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java7
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirer.java62
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java30
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java18
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirerTest.java71
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java50
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java7
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json3
-rwxr-xr-xscrewdriver/build-vespa.sh8
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdb_test.cpp124
-rw-r--r--searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp37
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h44
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp33
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h27
-rw-r--r--searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp18
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/group.h2
-rw-r--r--searchlib/src/vespa/searchlib/common/indexmetainfo.cpp54
-rw-r--r--searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp14
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzGroup.java41
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java17
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java3
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipEntity.java47
-rw-r--r--vespa-feed-client/abi-spec.json46
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java14
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java36
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java19
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java8
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java172
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java15
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java15
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java14
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java6
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java11
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java7
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/maintenance/JobMetrics.java23
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java18
-rw-r--r--vespajlib/src/test/java/com/yahoo/concurrent/maintenance/JobControlTest.java2
-rw-r--r--vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java35
-rw-r--r--vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java6
113 files changed, 1907 insertions, 822 deletions
diff --git a/README.md b/README.md
index 1a42eb2ff68..7d3be172cb5 100644
--- a/README.md
+++ b/README.md
@@ -8,9 +8,10 @@ over big data at serving time.
This is the primary repository for Vespa where all development is happening.
New production releases from this repository's master branch are made each weekday from Monday to Thursday.
-Home page: [https://vespa.ai](https://vespa.ai)
-Documentation: [https://docs.vespa.ai/](https://docs.vespa.ai/)
-Run applications in the cloud for free: [https://cloud.vespa.ai](https://cloud.vespa.ai)
+* Home page: [https://vespa.ai](https://vespa.ai)
+* Documentation: [https://docs.vespa.ai](https://docs.vespa.ai)
+* Continuous build: [https://factory.vespa.oath.cloud](https://factory.vespa.oath.cloud)
+* Run applications in the cloud for free: [https://cloud.vespa.ai](https://cloud.vespa.ai)
Vespa build status: [![Vespa Build Status](https://cd.screwdriver.cd/pipelines/6386/build-vespa/badge)](https://cd.screwdriver.cd/pipelines/6386)
diff --git a/TODO.md b/TODO.md
index eec3e3767b6..c633a1bf38a 100644
--- a/TODO.md
+++ b/TODO.md
@@ -17,6 +17,8 @@ bundles of parameters accessible to Searchers processing queries. Writes go thro
Document Processors, but have no equivalent support for parametrization. This is to allow configuration of document
processor profiles by reusing the query profile support also for document processors.
+See [slack discussion](https://vespatalk.slack.com/archives/C01QNBPPNT1/p1624176344102300) for more details.
+
**Code pointers:**
- [Query profiles](https://github.com/vespa-engine/vespa/blob/master/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfile.java)
- [Document processors](https://github.com/vespa-engine/vespa/blob/master/docproc/src/main/java/com/yahoo/docproc/DocumentProcessor.java)
diff --git a/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java b/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java
index 38d831a0b28..da338ad3107 100644
--- a/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java
+++ b/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java
@@ -3,7 +3,6 @@ package com.yahoo.documentmodel;
import com.yahoo.document.DataType;
import com.yahoo.document.Document;
-import com.yahoo.document.DocumentId;
import com.yahoo.document.Field;
import com.yahoo.document.StructDataType;
import com.yahoo.document.StructuredDataType;
@@ -32,34 +31,6 @@ import static java.util.Collections.emptySet;
*/
public final class NewDocumentType extends StructuredDataType implements DataTypeCollection {
- public static final class Name {
-
- private final String name;
- private final int id;
-
- public Name(String name) {
- this(name.hashCode(), name);
- }
-
- public Name(int id, String name) {
- this.id = id;
- this.name = name;
- }
-
- public String toString() { return name; }
-
- public final String getName() { return name; }
-
- public final int getId() { return id; }
-
- public int hashCode() { return name.hashCode(); }
-
- public boolean equals(Object other) {
- if ( ! (other instanceof Name)) return false;
- return name.equals(((Name)other).getName());
- }
- }
-
private final Name name;
private final DataTypeRepo dataTypes = new DataTypeRepo();
private final Map<Integer, NewDocumentType> inherits = new LinkedHashMap<>();
@@ -139,7 +110,7 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
}
@Override
- public Class getValueClass() {
+ public Class<Document> getValueClass() {
return Document.class;
}
@@ -148,7 +119,8 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
if (!(value instanceof Document)) {
return false;
}
- /** Temporary disabled due to clash with document and covariant return type
+ /*
+ Temporary disabled due to clash with document and covariant return type
Document doc = (Document) value;
if (((NewDocumentType) doc.getDataType()).inherits(this)) {
//the value is of this type; or the supertype of the value is of this type, etc....
@@ -162,28 +134,31 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
for (Field f : getFields()) {
Field inhF = inherited.getField(f.getName());
if (inhF != null && !inhF.equals(f)) {
- throw new IllegalArgumentException("Inherited document '" + inherited.toString() + "' already contains field '" +
- inhF.getName() + "'. Can not override with '" + f.getName() + "'.");
+ throw new IllegalArgumentException("Inherited document '" + inherited + "' already contains field '" +
+ inhF.getName() + "'. Can not override with '" + f.getName() + "'.");
}
}
for (Field f : inherited.getAllFields()) {
for (NewDocumentType side : inherits.values()) {
Field sideF = side.getField(f.getName());
if (sideF != null && !sideF.equals(f)) {
- throw new IllegalArgumentException("Inherited document '" + side.toString() + "' already contains field '" +
- sideF.getName() + "'. Document '" + inherited.toString() + "' also defines field '" + f.getName() +
- "'.Multiple inheritance must be disjunctive.");
+ throw new IllegalArgumentException("Inherited document '" + side + "' already contains field '" +
+ sideF.getName() + "'. Document '" + inherited +
+ "' also defines field '" + f.getName() +
+ "'.Multiple inheritance must be disjunctive.");
}
}
}
return true;
}
+
public void inherit(NewDocumentType inherited) {
if ( ! inherits.containsKey(inherited.getId())) {
verifyInheritance(inherited);
inherits.put(inherited.getId(), inherited);
}
}
+
public boolean inherits(NewDocumentType superType) {
if (getId() == superType.getId()) return true;
for (NewDocumentType type : inherits.values()) {
@@ -243,7 +218,7 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
@Override
public Document createFieldValue() {
- return new Document(null, (DocumentId)null);
+ throw new RuntimeException("Cannot create an instance of " + this);
}
@Override
@@ -375,4 +350,36 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
return importedFieldNames;
}
+ public static final class Name {
+
+ private final String name;
+ private final int id;
+
+ public Name(String name) {
+ this(name.hashCode(), name);
+ }
+
+ public Name(int id, String name) {
+ this.id = id;
+ this.name = name;
+ }
+
+ @Override
+ public String toString() { return name; }
+
+ public final String getName() { return name; }
+
+ public final int getId() { return id; }
+
+ @Override
+ public int hashCode() { return name.hashCode(); }
+
+ @Override
+ public boolean equals(Object other) {
+ if ( ! (other instanceof Name)) return false;
+ return name.equals(((Name)other).getName());
+ }
+
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
index fed35382b21..9b752c4179f 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
@@ -209,17 +209,13 @@ public class DocumentModelBuilder {
private static DataType resolveTemporariesRecurse(DataType type, DataTypeCollection repo,
Collection<NewDocumentType> docs) {
if (type instanceof TemporaryStructuredDataType) {
- NewDocumentType docType = getDocumentType(docs, type.getId());
- if (docType != null) {
- type = docType;
- return type;
- }
- DataType real = repo.getDataType(type.getId());
- if (real == null) {
- throw new NullPointerException("Can not find type '" + type.toString() + "', impossible.");
- }
- type = real;
- } else if (type instanceof StructDataType) {
+ DataType struct = repo.getDataType(type.getId());
+ if (struct != null)
+ type = struct;
+ else
+ type = getDocumentType(docs, type.getId());
+ }
+ else if (type instanceof StructDataType) {
StructDataType dt = (StructDataType) type;
for (com.yahoo.document.Field field : dt.getFields()) {
if (field.getDataType() != type) {
@@ -227,14 +223,17 @@ public class DocumentModelBuilder {
field.setDataType(resolveTemporariesRecurse(field.getDataType(), repo, docs));
}
}
- } else if (type instanceof MapDataType) {
+ }
+ else if (type instanceof MapDataType) {
MapDataType t = (MapDataType) type;
t.setKeyType(resolveTemporariesRecurse(t.getKeyType(), repo, docs));
t.setValueType(resolveTemporariesRecurse(t.getValueType(), repo, docs));
- } else if (type instanceof CollectionDataType) {
+ }
+ else if (type instanceof CollectionDataType) {
CollectionDataType t = (CollectionDataType) type;
t.setNestedType(resolveTemporariesRecurse(t.getNestedType(), repo, docs));
- } else if (type instanceof ReferenceDataType) {
+ }
+ else if (type instanceof ReferenceDataType) {
ReferenceDataType t = (ReferenceDataType) type;
if (t.getTargetType() instanceof TemporaryStructuredDataType) {
DataType targetType = resolveTemporariesRecurse(t.getTargetType(), repo, docs);
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
index 4b7b1625a01..9ce1b8bb330 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
@@ -98,7 +98,7 @@ public class Search implements ImmutableSearch {
private final DeployLogger deployLogger;
private final ModelContext.Properties properties;
- /** Testin only */
+ /** Testing only */
public Search(String name) {
this(name, null, new BaseDeployLogger(), new TestProperties());
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java b/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java
index 6203f78fc0c..dd35787571e 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java
@@ -87,7 +87,7 @@ public class ConfigSentinel extends AbstractService implements SentinelConfig.Pr
var builder = new SentinelConfig.Connectivity.Builder();
if (enable) {
builder.minOkPercent(50);
- builder.maxBadCount(2);
+ builder.maxBadCount(1);
} else {
builder.minOkPercent(0);
builder.maxBadCount(Integer.MAX_VALUE);
diff --git a/config-model/src/test/derived/namecollision/collision.sd b/config-model/src/test/derived/namecollision/collision.sd
new file mode 100644
index 00000000000..43dd4830204
--- /dev/null
+++ b/config-model/src/test/derived/namecollision/collision.sd
@@ -0,0 +1,8 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+search collision {
+
+ document collision {
+
+ }
+
+}
diff --git a/config-model/src/test/derived/namecollision/collisionstruct.sd b/config-model/src/test/derived/namecollision/collisionstruct.sd
new file mode 100644
index 00000000000..c98efb0b582
--- /dev/null
+++ b/config-model/src/test/derived/namecollision/collisionstruct.sd
@@ -0,0 +1,15 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+search collisionstruct {
+
+ document collisionstruct {
+
+ struct collision {
+ }
+
+ field structarray type array<collision> {
+ indexing: summary
+ }
+
+ }
+
+}
diff --git a/config-model/src/test/derived/namecollision/documentmanager.cfg b/config-model/src/test/derived/namecollision/documentmanager.cfg
new file mode 100644
index 00000000000..8d0d89dde35
--- /dev/null
+++ b/config-model/src/test/derived/namecollision/documentmanager.cfg
@@ -0,0 +1,55 @@
+enablecompression false
+datatype[].id 1381038251
+datatype[].structtype[].name "position"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].structtype[].field[].name "x"
+datatype[].structtype[].field[].datatype 0
+datatype[].structtype[].field[].detailedtype ""
+datatype[].structtype[].field[].name "y"
+datatype[].structtype[].field[].datatype 0
+datatype[].structtype[].field[].detailedtype ""
+datatype[].id -379118517
+datatype[].structtype[].name "collision.header"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].id 1557022836
+datatype[].documenttype[].name "collision"
+datatype[].documenttype[].version 0
+datatype[].documenttype[].inherits[].name "document"
+datatype[].documenttype[].inherits[].version 0
+datatype[].documenttype[].headerstruct -379118517
+datatype[].documenttype[].bodystruct 0
+datatype[].id 1557022836
+datatype[].structtype[].name "collision"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].id -1730522993
+datatype[].arraytype[].datatype 1557022836
+datatype[].id -1270379114
+datatype[].structtype[].name "collisionstruct.header"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].structtype[].field[].name "structarray"
+datatype[].structtype[].field[].datatype -1730522993
+datatype[].structtype[].field[].detailedtype ""
+datatype[].id -1723079287
+datatype[].documenttype[].name "collisionstruct"
+datatype[].documenttype[].version 0
+datatype[].documenttype[].inherits[].name "document"
+datatype[].documenttype[].inherits[].version 0
+datatype[].documenttype[].headerstruct -1270379114
+datatype[].documenttype[].bodystruct 0
+datatype[].documenttype[].fieldsets{[]}.fields[] "structarray"
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java
new file mode 100644
index 00000000000..fda9e6327ce
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java
@@ -0,0 +1,20 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.searchdefinition.derived;
+
+import org.junit.Test;
+
+/**
+ * Verifies that a struct in a document type is preferred over another dopcument type
+ * of the same name.
+ *
+ * @author bratseth
+ */
+public class NameCollisionTestCase extends AbstractExportingTestCase {
+
+ @Test
+ public void testNameCollision() throws Exception {
+ assertCorrectDeriving("namecollision", "collisionstruct", new TestableDeployLogger());
+ }
+
+}
diff --git a/config-proxy/src/main/sh/vespa-config-loadtester.sh b/config-proxy/src/main/sh/vespa-config-loadtester.sh
index f7cecbf292f..38be1cf7b33 100644
--- a/config-proxy/src/main/sh/vespa-config-loadtester.sh
+++ b/config-proxy/src/main/sh/vespa-config-loadtester.sh
@@ -79,4 +79,4 @@ export ROOT
echo "# Using CLASSPATH=$CLASSPATH, args=$@"
-java -cp $CLASSPATH:$ROOT/lib/jars/config-proxy-jar-with-dependencies.jar com.yahoo.vespa.config.benchmark.LoadTester "$@"
+java -Xms1g -Xmx1g -cp $CLASSPATH:$ROOT/lib/jars/config-proxy-jar-with-dependencies.jar com.yahoo.vespa.config.benchmark.LoadTester "$@"
diff --git a/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java b/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java
index f57da6921ab..182c30a0ece 100644
--- a/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java
+++ b/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java
@@ -78,10 +78,10 @@ public class LoadTester {
String configsList, String defPath) throws IOException, InterruptedException {
configs = readConfigs(configsList);
defs = readDefs(defPath);
- Metrics m = new Metrics();
List<LoadThread> threadList = new ArrayList<>();
+ long startInNanos = System.nanoTime();
+ Metrics m = new Metrics();
- long start = System.currentTimeMillis();
for (int i = 0; i < threads; i++) {
LoadThread lt = new LoadThread(iterations, host, port);
threadList.add(lt);
@@ -92,22 +92,23 @@ public class LoadTester {
lt.join();
m.merge(lt.metrics);
}
- printOutput(start, threads, iterations, m);
+ printOutput(startInNanos, threads, iterations, m);
}
private Map<ConfigDefinitionKey, Tuple2<String, String[]>> readDefs(String defPath) throws IOException {
Map<ConfigDefinitionKey, Tuple2<String, String[]>> ret = new HashMap<>();
if (defPath == null) return ret;
+
File defDir = new File(defPath);
if (!defDir.isDirectory()) {
- System.out.println("# Given def file dir is not a directory: " + defDir.getPath() +
- " , will not send def contents in requests.");
+ System.out.println("# Given def file dir is not a directory: " +
+ defDir.getPath() + " , will not send def contents in requests.");
return ret;
}
- final File[] files = defDir.listFiles();
+ File[] files = defDir.listFiles();
if (files == null) {
- System.out.println("# Given def file dir has no files: " + defDir.getPath() +
- " , will not send def contents in requests.");
+ System.out.println("# Given def file dir has no files: " +
+ defDir.getPath() + " , will not send def contents in requests.");
return ret;
}
for (File f : files) {
@@ -115,21 +116,18 @@ public class LoadTester {
if (!name.endsWith(".def")) continue;
String contents = IOUtils.readFile(f);
ConfigDefinitionKey key = ConfigUtils.createConfigDefinitionKeyFromDefFile(f);
- ret.put(key, new Tuple2<>(ConfigUtils.getDefMd5(Arrays.asList(contents.split("\n"))),
- contents.split("\n")));
+ ret.put(key, new Tuple2<>(ConfigUtils.getDefMd5(Arrays.asList(contents.split("\n"))), contents.split("\n")));
}
System.out.println("# Read " + ret.size() + " def files from " + defDir.getPath());
return ret;
}
- private void printOutput(long start, long threads, long iterations, Metrics metrics) {
- long stop = System.currentTimeMillis();
- float durSec = (float) (stop - start) / 1000f;
+ private void printOutput(long startInNanos, long threads, long iterations, Metrics metrics) {
+ float durSec = (float) (System.nanoTime() - startInNanos) / 1_000_000_000f;
StringBuilder sb = new StringBuilder();
- sb.append("#reqs/sec #bytes/sec #avglatency #minlatency #maxlatency #failedrequests\n");
+ sb.append("#reqs/sec #avglatency #minlatency #maxlatency #failedrequests\n");
sb.append(((float) (iterations * threads)) / durSec).append(",");
- sb.append((metrics.totBytes / durSec)).append(",");
- sb.append((metrics.totLatency / threads / iterations)).append(",");
+ sb.append((metrics.latencyInMillis / threads / iterations)).append(",");
sb.append((metrics.minLatency)).append(",");
sb.append((metrics.maxLatency)).append(",");
sb.append((metrics.failedRequests));
@@ -155,23 +153,20 @@ public class LoadTester {
private static class Metrics {
- long totBytes = 0;
- long totLatency = 0;
+ long latencyInMillis = 0;
long failedRequests = 0;
long maxLatency = Long.MIN_VALUE;
long minLatency = Long.MAX_VALUE;
public void merge(Metrics m) {
- this.totBytes += m.totBytes;
- this.totLatency += m.totLatency;
+ this.latencyInMillis += m.latencyInMillis;
this.failedRequests += m.failedRequests;
updateMin(m.minLatency);
updateMax(m.maxLatency);
}
- public void update(long bytes, long latency) {
- this.totBytes += bytes;
- this.totLatency += latency;
+ public void update(long latency) {
+ this.latencyInMillis += latency;
updateMin(latency);
updateMax(latency);
}
@@ -211,30 +206,37 @@ public class LoadTester {
for (int i = 0; i < iterations; i++) {
ConfigKey<?> reqKey = configs.get(ThreadLocalRandom.current().nextInt(configs.size()));
- JRTClientConfigRequest request = getRequest(reqKey);
+ ConfigDefinitionKey dKey = new ConfigDefinitionKey(reqKey);
+ Tuple2<String, String[]> defContent = defs.get(dKey);
+ if (defContent == null && defs.size() > 0) { // Only complain if we actually did run with a def dir
+ System.out.println("# No def found for " + dKey + ", not sending in request.");
+ }
+ ConfigKey<?> configKey = createFull(reqKey.getName(), reqKey.getConfigId(), reqKey.getNamespace(), defContent.first);
+ JRTClientConfigRequest request = createRequest(configKey, defContent.second);
if (debug) System.out.println("# Requesting: " + reqKey);
-
- long start = System.currentTimeMillis();
+ long start = System.nanoTime();
target.invokeSync(request.getRequest(), 10.0);
- long end = System.currentTimeMillis();
-
+ long durationInMillis = (System.nanoTime() - start) / 1_000_000;
if (request.isError()) {
target = handleError(request, spec, target);
} else {
- System.out.println("# Connection OK");
- long duration = end - start;
-
- if (debug) {
- String payload = request.getNewPayload().toString();
- metrics.update(payload.length(), duration); // assume 8 bit...
- System.out.println("# Ret: " + payload);
- } else {
- metrics.update(0, duration);
- }
+ metrics.update(durationInMillis);
}
}
}
+ private JRTClientConfigRequest createRequest(ConfigKey<?> reqKey, String[] defContent) {
+ if (defContent == null) defContent = new String[0];
+ final long serverTimeout = 1000;
+ return JRTClientConfigRequestV3.createWithParams(reqKey, DefContent.fromList(Arrays.asList(defContent)),
+ ConfigUtils.getCanonicalHostName(), "", 0, serverTimeout, Trace.createDummy(),
+ compressionType, Optional.empty());
+ }
+
+ private Target connect(Spec spec) {
+ return supervisor.connect(spec);
+ }
+
private Target handleError(JRTClientConfigRequest request, Spec spec, Target target) {
if (List.of("Connection lost", "Connection down").contains(request.errorMessage())) {
try {
@@ -252,24 +254,6 @@ public class LoadTester {
return target;
}
- private JRTClientConfigRequest getRequest(ConfigKey<?> reqKey) {
- long serverTimeout = 1000;
-
- ConfigDefinitionKey dKey = new ConfigDefinitionKey(reqKey);
- Tuple2<String, String[]> defPair = defs.get(dKey);
-
- String defMd5 = defPair.first;
- DefContent defContent = DefContent.fromList(List.of(defPair.second));
-
- ConfigKey<?> fullKey = createFull(reqKey.getName(), reqKey.getConfigId(), reqKey.getNamespace(), defMd5);
- return JRTClientConfigRequestV3.createWithParams(fullKey, defContent, ConfigUtils.getCanonicalHostName(),
- "", 0, serverTimeout,
- Trace.createDummy(), compressionType, Optional.empty());
- }
-
- private Target connect(Spec spec) {
- return supervisor.connect(spec);
- }
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index 276eb51981c..818e65b6caf 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -1087,9 +1087,9 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
}
ReindexActions reindexActions = actions.getReindexActions();
if ( ! reindexActions.isEmpty()) {
- logger.logApplicationPackage(Level.WARNING,
- "Change(s) between active and new application that may require re-index:\n" +
- reindexActions.format());
+ logger.log(Level.WARNING,
+ "Change(s) between active and new application that may require re-index:\n" +
+ reindexActions.format());
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
index 062a21b1f80..cdfdce91500 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
@@ -40,6 +40,11 @@ public class DeploymentMetricsResponse extends SlimeJsonResponse {
metrics.setDouble("diskUtil", disk.util());
metrics.setDouble("diskFeedBlockLimit", disk.feedBlockLimit());
});
+
+ aggregator.reindexingProgress().ifPresent(reindexingProgress -> {
+ Cursor progressObject = cluster.setObject("reindexingProgress");
+ reindexingProgress.forEach(progressObject::setDouble);
+ });
}
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java
index e0f0a4b4099..2d88ee77deb 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java
@@ -48,8 +48,7 @@ public abstract class ConfigServerMaintainer extends Maintainer {
}
@Override
- protected void recordCompletion(String job, Long consecutiveFailures, double successFactor) {
- metric.set("maintenance.consecutiveFailures", consecutiveFailures, metric.createContext(Map.of("job", job)));
+ public void completed(String job, double successFactor) {
metric.set("maintenance.successFactor", successFactor, metric.createContext(Map.of("job", job)));
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
index e1135063f97..77e2f923d4a 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
@@ -127,8 +127,10 @@ public class ClusterDeploymentMetricsRetriever {
case VESPA_CONTAINER:
optionalDouble(values.field("query_latency.sum")).ifPresent(qlSum ->
aggregator.get()
- .addContainerLatency(qlSum, values.field("query_latency.count").asDouble())
- .addFeedLatency(values.field("feed.latency.sum").asDouble(), values.field("feed.latency.count").asDouble()));
+ .addContainerLatency(qlSum, values.field("query_latency.count").asDouble()));
+ optionalDouble(values.field("feed.latency.sum")).ifPresent(flSum ->
+ aggregator.get()
+ .addFeedLatency(flSum, values.field("feed.latency.count").asDouble()));
break;
case VESPA_QRSERVER:
optionalDouble(values.field("query_latency.sum")).ifPresent(qlSum ->
@@ -146,6 +148,10 @@ public class ClusterDeploymentMetricsRetriever {
values.field("cluster-controller.resource_usage.memory_limit.last").asDouble())
.addDiskUsage(values.field("cluster-controller.resource_usage.max_disk_utilization.last").asDouble(),
values.field("cluster-controller.resource_usage.disk_limit.last").asDouble()));
+ optionalDouble(values.field("reindexing.progress.last")).ifPresent(progress -> {
+ if (progress < 0 || progress >= 1) return;
+ aggregator.get().addReindexingProgress(metric.field("dimensions").field("documenttype").asString(), progress);
+ });
break;
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
index f27cf942dd8..7ce6d84ad8c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
@@ -1,6 +1,8 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.metrics;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Optional;
/**
@@ -15,6 +17,7 @@ public class DeploymentMetricsAggregator {
private Double documentCount;
private ResourceUsage memoryUsage;
private ResourceUsage diskUsage;
+ private Map<String, Double> reindexingProgress;
public synchronized DeploymentMetricsAggregator addFeedLatency(double sum, double count) {
this.feed = combineLatency(this.feed, sum, count);
@@ -46,6 +49,12 @@ public class DeploymentMetricsAggregator {
return this;
}
+ public synchronized DeploymentMetricsAggregator addReindexingProgress(String documentType, double progress) {
+ if (reindexingProgress == null) this.reindexingProgress = new HashMap<>();
+ this.reindexingProgress.put(documentType, progress);
+ return this;
+ }
+
public Optional<Double> aggregateFeedLatency() {
return Optional.ofNullable(feed).map(m -> m.sum / m.count).filter(num -> !num.isNaN());
}
@@ -80,6 +89,10 @@ public class DeploymentMetricsAggregator {
return Optional.ofNullable(diskUsage);
}
+ public Optional<Map<String, Double>> reindexingProgress() {
+ return Optional.ofNullable(reindexingProgress);
+ }
+
private static LatencyMetrics combineLatency(LatencyMetrics metricsOrNull, double sum, double count) {
return Optional.ofNullable(metricsOrNull).orElseGet(LatencyMetrics::new).combine(sum, count);
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
index 7fdfbcdbf03..b5bcae65009 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
@@ -70,7 +70,9 @@ public class ClusterDeploymentMetricsRetrieverTest {
new DeploymentMetricsAggregator()
.addDocumentCount(6000.0)
.addMemoryUsage(0.89074, 0.8)
- .addDiskUsage(0.83517, 0.75),
+ .addDiskUsage(0.83517, 0.75)
+ .addReindexingProgress("test_artifacts", 0.71)
+ .addReindexingProgress("announcements", 0),
aggregatorMap.get(expectedContentCluster)
);
@@ -113,6 +115,7 @@ public class ClusterDeploymentMetricsRetrieverTest {
compareOptionals(expected.diskUsage(), actual.diskUsage(), (a, b) -> assertDoubles.accept(a.feedBlockLimit(), b.feedBlockLimit()));
compareOptionals(expected.memoryUsage(), actual.memoryUsage(), (a, b) -> assertDoubles.accept(a.util(), b.util()));
compareOptionals(expected.memoryUsage(), actual.memoryUsage(), (a, b) -> assertDoubles.accept(a.feedBlockLimit(), b.feedBlockLimit()));
+ assertEquals(expected.reindexingProgress(), actual.reindexingProgress());
}
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
diff --git a/configserver/src/test/resources/metrics/clustercontroller_metrics.json b/configserver/src/test/resources/metrics/clustercontroller_metrics.json
index 9afcb34d77d..65468749940 100644
--- a/configserver/src/test/resources/metrics/clustercontroller_metrics.json
+++ b/configserver/src/test/resources/metrics/clustercontroller_metrics.json
@@ -20,6 +20,48 @@
},
{
"values": {
+ "reindexing.progress.last": 0.71
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id",
+ "documenttype": "test_artifacts"
+ }
+ },
+ {
+ "values": {
+ "reindexing.progress.last": 1
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id",
+ "documenttype": "builds"
+ }
+ },
+ {
+ "values": {
+ "reindexing.progress.last": 0
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id",
+ "documenttype": "announcements",
+ "state": "running"
+ }
+ },
+ {
+ "values": {
+ "reindexing.progress.last": -1
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id",
+ "documenttype": "announcements",
+ "state": "successful"
+ }
+ },
+ {
+ "values": {
"some.other.metrics": 1
},
"dimensions": {
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
index 0e11bcdccaf..ee74aca0e14 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
@@ -23,11 +23,13 @@ public class ClusterMetrics {
private final String clusterId;
private final String clusterType;
private final Map<String, Double> metrics;
+ private final Map<String, Double> reindexingProgress;
- public ClusterMetrics(String clusterId, String clusterType, Map<String, Double> metrics) {
+ public ClusterMetrics(String clusterId, String clusterType, Map<String, Double> metrics, Map<String, Double> reindexingProgress) {
this.clusterId = clusterId;
this.clusterType = clusterType;
this.metrics = Map.copyOf(metrics);
+ this.reindexingProgress = Map.copyOf(reindexingProgress);
}
public String getClusterId() {
@@ -74,4 +76,7 @@ public class ClusterMetrics {
return Optional.ofNullable(metrics.get(DISK_FEED_BLOCK_LIMIT));
}
+ public Map<String, Double> reindexingProgress() {
+ return reindexingProgress;
+ }
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java
index 233759f47a7..0be32165916 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java
@@ -3,6 +3,7 @@
package com.yahoo.vespa.hosted.controller.api.integration.athenz;
import com.yahoo.vespa.athenz.api.AthenzDomain;
+import com.yahoo.vespa.athenz.api.AthenzGroup;
import com.yahoo.vespa.athenz.api.AthenzRole;
import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.athenz.client.zms.ZmsClient;
@@ -14,18 +15,25 @@ import java.util.stream.Collectors;
public class AthenzAccessControlService implements AccessControlService {
+ private static final String ALLOWED_OPERATOR_GROUPNAME = "vespa-team";
private static final String DATAPLANE_ACCESS_ROLENAME = "operator-data-plane";
private final ZmsClient zmsClient;
private final AthenzRole dataPlaneAccessRole;
+ private final AthenzGroup vespaTeam;
public AthenzAccessControlService(ZmsClient zmsClient, AthenzDomain domain) {
this.zmsClient = zmsClient;
this.dataPlaneAccessRole = new AthenzRole(domain, DATAPLANE_ACCESS_ROLENAME);
+ this.vespaTeam = new AthenzGroup(domain, ALLOWED_OPERATOR_GROUPNAME);
}
@Override
public boolean approveDataPlaneAccess(AthenzUser user, Instant expiry) {
+ // Can only approve team members, other members must be manually approved
+ if(!isVespaTeamMember(user)) {
+ throw new IllegalArgumentException(String.format("User %s requires manual approval, please contact Vespa team", user.getName()));
+ }
List<AthenzUser> users = zmsClient.listPendingRoleApprovals(dataPlaneAccessRole);
if (users.contains(user)) {
zmsClient.approvePendingRoleMembership(dataPlaneAccessRole, user, expiry);
@@ -42,4 +50,8 @@ public class AthenzAccessControlService implements AccessControlService {
.map(AthenzUser.class::cast)
.collect(Collectors.toList());
}
+
+ public boolean isVespaTeamMember(AthenzUser user) {
+ return zmsClient.getGroupMembership(vespaTeam, user);
+ }
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java
index deeecf217e7..ed84a9b0a76 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.controller.api.integration.athenz;
import com.yahoo.vespa.athenz.api.AthenzDomain;
+import com.yahoo.vespa.athenz.api.AthenzGroup;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzResourceName;
import com.yahoo.vespa.athenz.api.AthenzRole;
@@ -98,6 +99,11 @@ public class ZmsClientMock implements ZmsClient {
}
@Override
+ public boolean getGroupMembership(AthenzGroup group, AthenzIdentity identity) {
+ return false;
+ }
+
+ @Override
public List<AthenzDomain> getDomainList(String prefix) {
log("getDomainList()");
return new ArrayList<>(athenz.domains.keySet());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index c867b97b544..ff10f3b77ca 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -445,6 +445,11 @@ public class ApplicationController {
// Validate new deployment spec thoroughly before storing it.
controller.jobController().deploymentStatus(application.get());
+ // Clear notifications for instances that are no longer declared
+ for (var name : existingInstances)
+ if ( ! declaredInstances.contains(name))
+ controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
+
store(application);
return application;
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java
index 2322b251fe0..4f01df21430 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java
@@ -1,15 +1,14 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.application;
-import com.google.common.collect.ImmutableList;
-
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.UncheckedIOException;
import java.nio.file.Path;
-import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.function.Predicate;
import java.util.zip.ZipEntry;
@@ -21,19 +20,19 @@ import java.util.zip.ZipOutputStream;
*/
public class ZipStreamReader {
- private final ImmutableList<ZipEntryWithContent> entries;
+ private final List<ZipEntryWithContent> entries = new ArrayList<>();
private final int maxEntrySizeInBytes;
public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes) {
this.maxEntrySizeInBytes = maxEntrySizeInBytes;
try (ZipInputStream zipInput = new ZipInputStream(input)) {
- ImmutableList.Builder<ZipEntryWithContent> builder = new ImmutableList.Builder<>();
ZipEntry zipEntry;
+
while (null != (zipEntry = zipInput.getNextEntry())) {
if (!entryNameMatcher.test(requireName(zipEntry.getName()))) continue;
- builder.add(new ZipEntryWithContent(zipEntry, readContent(zipInput)));
+ entries.add(new ZipEntryWithContent(zipEntry, readContent(zipInput)));
}
- entries = builder.build();
+
} catch (IOException e) {
throw new UncheckedIOException("IO error reading zip content", e);
}
@@ -79,10 +78,10 @@ public class ZipStreamReader {
}
}
- public List<ZipEntryWithContent> entries() { return entries; }
+ public List<ZipEntryWithContent> entries() { return Collections.unmodifiableList(entries); }
private static String requireName(String name) {
- if (Arrays.asList(name.split("/")).contains("..") ||
+ if (List.of(name.split("/")).contains("..") ||
!trimTrailingSlash(name).equals(Path.of(name).normalize().toString())) {
throw new IllegalArgumentException("Unexpected non-normalized path found in zip content: '" + name + "'");
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
index 53e88a9a5ac..4f4f4b59a2c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
@@ -5,6 +5,7 @@ import com.yahoo.config.application.api.DeploymentInstanceSpec;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.InstanceName;
+import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -32,6 +33,7 @@ import java.util.OptionalLong;
import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.groupingBy;
@@ -199,6 +201,35 @@ public class DeploymentTrigger {
return List.copyOf(jobs.keySet());
}
+ /** retrigger job. If the job is already running, it will be canceled, and retrigger enqueued. */
+ public Optional<JobId> reTriggerOrAddToQueue(DeploymentId deployment) {
+ JobType jobType = JobType.from(controller.system(), deployment.zoneId())
+ .orElseThrow(() -> new IllegalArgumentException(String.format("No job to trigger for (system/zone): %s/%s", controller.system().value(), deployment.zoneId().value())));
+ Optional<Run> existingRun = controller.jobController().active(deployment.applicationId()).stream()
+ .filter(run -> run.id().type().equals(jobType))
+ .findFirst();
+
+ if (existingRun.isPresent()) {
+ Run run = existingRun.get();
+ try (Lock lock = controller.curator().lockDeploymentRetriggerQueue()) {
+ List<RetriggerEntry> retriggerEntries = controller.curator().readRetriggerEntries();
+ List<RetriggerEntry> newList = new ArrayList<>(retriggerEntries);
+ RetriggerEntry requiredEntry = new RetriggerEntry(new JobId(deployment.applicationId(), jobType), run.id().number() + 1);
+ if(newList.stream().noneMatch(entry -> entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun()>=requiredEntry.requiredRun())) {
+ newList.add(requiredEntry);
+ }
+ newList = newList.stream()
+ .filter(entry -> !(entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun() < requiredEntry.requiredRun()))
+ .collect(toList());
+ controller.curator().writeRetriggerEntries(newList);
+ }
+ controller.jobController().abort(run.id());
+ return Optional.empty();
+ } else {
+ return Optional.of(reTrigger(deployment.applicationId(), jobType));
+ }
+ }
+
/** Prevents jobs of the given type from starting, until the given time. */
public void pauseJob(ApplicationId id, JobType jobType, Instant until) {
if (until.isAfter(clock.instant().plus(maxPause)))
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntry.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntry.java
new file mode 100644
index 00000000000..9c16d80313e
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntry.java
@@ -0,0 +1,27 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.deployment;
+
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
+
+/**
+ * @author mortent
+ */
+public class RetriggerEntry {
+ private final JobId jobId;
+ private final long requiredRun;
+
+ public RetriggerEntry(JobId jobId, long requiredRun) {
+ this.jobId = jobId;
+ this.requiredRun = requiredRun;
+ }
+
+ public JobId jobId() {
+ return jobId;
+ }
+
+ public long requiredRun() {
+ return requiredRun;
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntrySerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntrySerializer.java
new file mode 100644
index 00000000000..6d9206d42b6
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntrySerializer.java
@@ -0,0 +1,63 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.deployment;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * @author mortent
+ */
+public class RetriggerEntrySerializer {
+
+ private static final String JOB_ID_KEY = "jobId";
+ private static final String APPLICATION_ID_KEY = "applicationId";
+ private static final String JOB_TYPE_KEY = "jobType";
+ private static final String MIN_REQUIRED_RUN_ID_KEY = "minimumRunId";
+
+ public static List<RetriggerEntry> fromSlime(Slime slime) {
+ return SlimeUtils.entriesStream(slime.get().field("entries"))
+ .map(RetriggerEntrySerializer::deserializeEntry)
+ .collect(Collectors.toList());
+ }
+
+ public static Slime toSlime(List<RetriggerEntry> entryList) {
+ Slime slime = new Slime();
+ Cursor root = slime.setObject();
+ Cursor entries = root.setArray("entries");
+ entryList.forEach(e -> RetriggerEntrySerializer.serializeEntry(entries, e));
+ return slime;
+ }
+
+ private static void serializeEntry(Cursor array, RetriggerEntry entry) {
+ Cursor root = array.addObject();
+ Cursor jobid = root.setObject(JOB_ID_KEY);
+ jobid.setString(APPLICATION_ID_KEY, entry.jobId().application().serializedForm());
+ jobid.setString(JOB_TYPE_KEY, entry.jobId().type().jobName());
+ root.setLong(MIN_REQUIRED_RUN_ID_KEY, entry.requiredRun());
+ }
+
+ private static RetriggerEntry deserializeEntry(Inspector inspector) {
+ Inspector jobid = inspector.field(JOB_ID_KEY);
+ ApplicationId applicationId = ApplicationId.fromSerializedForm(require(jobid, APPLICATION_ID_KEY).asString());
+ JobType jobType = JobType.fromJobName(require(jobid, JOB_TYPE_KEY).asString());
+ long minRequiredRunId = require(inspector, MIN_REQUIRED_RUN_ID_KEY).asLong();
+ return new RetriggerEntry(new JobId(applicationId, jobType), minRequiredRunId);
+ }
+
+ private static Inspector require(Inspector inspector, String fieldName) {
+ Inspector field = inspector.field(fieldName);
+ if (!field.valid()) {
+ throw new IllegalStateException("Could not deserialize, field not found in json: " + fieldName);
+ }
+ return field;
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java
new file mode 100644
index 00000000000..be8f4254b79
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java
@@ -0,0 +1,80 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.vespa.flags.ListFlag;
+import com.yahoo.vespa.flags.PermanentFlags;
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
+import com.yahoo.vespa.hosted.controller.tenant.LastLoginInfo;
+import com.yahoo.vespa.hosted.controller.tenant.Tenant;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Expires unused tenants from Vespa Cloud.
+ *
+ * @author ogronnesby
+ */
+public class CloudTrialExpirer extends ControllerMaintainer {
+
+ private static Duration loginExpiry = Duration.ofDays(14);
+ private final ListFlag<String> extendedTrialTenants;
+
+ public CloudTrialExpirer(Controller controller, Duration interval) {
+ super(controller, interval, null, SystemName.allOf(SystemName::isPublic));
+ this.extendedTrialTenants = PermanentFlags.EXTENDED_TRIAL_TENANTS.bindTo(controller().flagSource());
+ }
+
+ @Override
+ protected double maintain() {
+ var expiredTenants = controller().tenants().asList().stream()
+ .filter(this::tenantIsCloudTenant) // only valid for cloud tenants
+ .filter(this::tenantHasTrialPlan) // only valid to expire actual trial tenants
+ .filter(this::tenantIsNotExemptFromExpiry) // feature flag might exempt tenant from expiry
+ .filter(this::tenantReadersNotLoggedIn) // no user logged in last 14 days
+ .filter(this::tenantHasNoDeployments) // no running deployments active
+ .collect(Collectors.toList());
+
+ expireTenants(expiredTenants);
+
+ return 0;
+ }
+
+ private boolean tenantIsCloudTenant(Tenant tenant) {
+ return tenant.type() == Tenant.Type.cloud;
+ }
+
+ private boolean tenantReadersNotLoggedIn(Tenant tenant) {
+ return tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
+ .map(instant -> {
+ var sinceLastLogin = Duration.between(instant, controller().clock().instant());
+ return sinceLastLogin.compareTo(loginExpiry) > 0;
+ })
+ .orElse(false);
+ }
+
+ private boolean tenantHasTrialPlan(Tenant tenant) {
+ var planId = controller().serviceRegistry().billingController().getPlan(tenant.name());
+ return "trial".equals(planId.value());
+ }
+
+ private boolean tenantIsNotExemptFromExpiry(Tenant tenant) {
+ return ! extendedTrialTenants.value().contains(tenant.name().value());
+ }
+
+ private boolean tenantHasNoDeployments(Tenant tenant) {
+ return controller().applications().asList(tenant.name()).stream()
+ .flatMap(app -> app.instances().values().stream())
+ .mapToLong(instance -> instance.deployments().values().size())
+ .sum() == 0;
+ }
+
+ private void expireTenants(List<Tenant> tenants) {
+ tenants.forEach(tenant -> {
+ controller().serviceRegistry().billingController().setPlan(tenant.name(), PlanId.from("none"), false);
+ });
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java
index 810c412fcc0..f7c4a95baf1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java
@@ -56,8 +56,7 @@ public abstract class ControllerMaintainer extends Maintainer {
}
@Override
- protected void recordCompletion(String job, Long incompleteRuns, double successFactor) {
- metric.set("maintenance.consecutiveFailures", incompleteRuns, metric.createContext(Map.of("job", job)));
+ public void completed(String job, double successFactor) {
metric.set("maintenance.successFactor", successFactor, metric.createContext(Map.of("job", job)));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
index 5a7ef12b246..91dfed500e3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
@@ -10,6 +10,7 @@ import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.controller.Controller;
import java.time.Duration;
+import java.time.temporal.ChronoUnit;
import java.time.temporal.TemporalUnit;
import java.util.Collections;
import java.util.List;
@@ -70,6 +71,8 @@ public class ControllerMaintenance extends AbstractComponent {
maintainers.add(new TenantRoleMaintainer(controller, intervals.tenantRoleMaintainer));
maintainers.add(new ChangeRequestMaintainer(controller, intervals.changeRequestMaintainer));
maintainers.add(new VCMRMaintainer(controller, intervals.vcmrMaintainer));
+ maintainers.add(new CloudTrialExpirer(controller, intervals.defaultInterval));
+ maintainers.add(new RetriggerMaintainer(controller, intervals.retriggerMaintainer));
}
public Upgrader upgrader() { return upgrader; }
@@ -125,6 +128,7 @@ public class ControllerMaintenance extends AbstractComponent {
private final Duration tenantRoleMaintainer;
private final Duration changeRequestMaintainer;
private final Duration vcmrMaintainer;
+ private final Duration retriggerMaintainer;
public Intervals(SystemName system) {
this.system = Objects.requireNonNull(system);
@@ -157,6 +161,7 @@ public class ControllerMaintenance extends AbstractComponent {
this.tenantRoleMaintainer = duration(5, MINUTES);
this.changeRequestMaintainer = duration(1, HOURS);
this.vcmrMaintainer = duration(1, HOURS);
+ this.retriggerMaintainer = duration(1, MINUTES);
}
private Duration duration(long amount, TemporalUnit unit) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
index 20154c4f122..ba4aaf92fc8 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
@@ -69,7 +69,7 @@ public class DeploymentMetricsMaintainer extends ControllerMaintainer {
lockedInstance -> lockedInstance.with(existingDeployment.zone(), newMetrics)
.recordActivityAt(now, existingDeployment.zone())));
- controller().notificationsDb().setDeploymentFeedingBlockedNotifications(deploymentId, clusterMetrics);
+ controller().notificationsDb().setDeploymentMetricsNotifications(deploymentId, clusterMetrics);
});
} catch (Exception e) {
failures.incrementAndGet();
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainer.java
new file mode 100644
index 00000000000..2cc3ac1bd6c
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainer.java
@@ -0,0 +1,66 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
+import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntry;
+import com.yahoo.vespa.hosted.controller.deployment.Run;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.Optional;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+public class RetriggerMaintainer extends ControllerMaintainer {
+
+ private static final Logger logger = Logger.getLogger(RetriggerMaintainer.class.getName());
+
+ public RetriggerMaintainer(Controller controller, Duration interval) {
+ super(controller, interval);
+ }
+
+ @Override
+ protected double maintain() {
+ try (var lock = controller().curator().lockDeploymentRetriggerQueue()) {
+ List<RetriggerEntry> retriggerEntries = controller().curator().readRetriggerEntries();
+
+ // Trigger all jobs that still need triggering and is not running
+ retriggerEntries.stream()
+ .filter(this::needsTrigger)
+ .filter(entry -> readyToTrigger(entry.jobId()))
+ .forEach(entry -> controller().applications().deploymentTrigger().reTrigger(entry.jobId().application(), entry.jobId().type()));
+
+ // Remove all jobs that has succeeded with the required job run and persist the list
+ List<RetriggerEntry> remaining = retriggerEntries.stream()
+ .filter(this::needsTrigger)
+ .collect(Collectors.toList());
+ controller().curator().writeRetriggerEntries(remaining);
+ } catch (Exception e) {
+ logger.log(Level.WARNING, "Exception while triggering jobs", e);
+ return 0.0;
+ }
+ return 1.0;
+ }
+
+ /*
+ Returns true if a job is ready to run, i.e is currently not running
+ */
+ private boolean readyToTrigger(JobId jobId) {
+ Optional<Run> existingRun = controller().jobController().active(jobId.application()).stream()
+ .filter(run -> run.id().type().equals(jobId.type()))
+ .findFirst();
+ return existingRun.isEmpty();
+ }
+
+ /*
+ Returns true of job needs triggering. I.e the job has not run since the queue item was last run.
+ */
+ private boolean needsTrigger(RetriggerEntry entry) {
+ return controller().jobController().lastCompleted(entry.jobId())
+ .filter(run -> run.id().number() < entry.requiredRun())
+ .isPresent();
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
index ea0422ea9fc..b65a9290e43 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
@@ -62,7 +62,7 @@ public class Notification {
public enum Level {
// Must be ordered in order of importance
- warning, error
+ info, warning, error
}
public enum Type {
@@ -73,7 +73,10 @@ public class Notification {
deployment,
/** Application cluster is (near) external feed blocked */
- feedBlock;
+ feedBlock,
+
+ /** Application cluster is reindexing document(s) */
+ reindex;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
index 21df0c01f0f..7c2d990750c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.controller.notification;
import com.yahoo.collections.Pair;
+import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -16,6 +17,7 @@ import java.util.Comparator;
import java.util.List;
import java.util.Locale;
import java.util.Optional;
+import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@@ -34,6 +36,13 @@ public class NotificationsDb {
public NotificationsDb(Controller controller) {
this(controller.clock(), controller.curator());
+
+ Set<DeploymentId> allDeployments = controller.applications().asList().stream()
+ .flatMap(application -> application.instances().values().stream())
+ .flatMap(instance -> instance.deployments().keySet().stream()
+ .map(zone -> new DeploymentId(instance.id(), zone)))
+ .collect(Collectors.toSet());
+ removeNotificationsForRemovedInstances(allDeployments);
}
NotificationsDb(Clock clock, CuratorDb curatorDb) {
@@ -41,6 +50,26 @@ public class NotificationsDb {
this.curatorDb = curatorDb;
}
+ // TODO (freva): Remove after 7.423
+ void removeNotificationsForRemovedInstances(Set<DeploymentId> allDeployments) {
+ // Prior to 7.423, notifications created for instances that were later removed by being removed from
+ // deployment.xml were not cleared. This should only affect notifications with type 'deployment'
+ allDeployments.stream()
+ .map(deploymentId -> deploymentId.applicationId().tenant())
+ .distinct()
+ .flatMap(tenant -> curatorDb.readNotifications(tenant).stream()
+ .filter(notification -> notification.type() == Type.deployment && notification.source().zoneId().isPresent())
+ .map(Notification::source))
+ .filter(source -> {
+ ApplicationId sourceApplication = ApplicationId.from(source.tenant(),
+ source.application().get(),
+ source.instance().get());
+ DeploymentId sourceDeployment = new DeploymentId(sourceApplication, source.zoneId().get());
+ return ! allDeployments.contains(sourceDeployment);
+ })
+ .forEach(source -> removeNotification(source, Type.deployment));
+ }
+
public List<Notification> listNotifications(NotificationSource source, boolean productionOnly) {
return curatorDb.readNotifications(source.tenant()).stream()
.filter(notification -> source.contains(notification.source()) && (!productionOnly || notification.source().isProduction()))
@@ -95,31 +124,22 @@ public class NotificationsDb {
}
/**
- * Updates feeding blocked notifications for the given deployment based on current cluster metrics.
- * Will clear notifications of any cluster not reporting the metrics or whose metrics indicate feed is not blocked,
- * while setting notifications for cluster that are (Level.error) or are nearly (Level.warning) feed blocked.
+ * Updates notifications based on deployment metrics (e.g. feed blocked and reindexing progress) for the given
+ * deployment based on current cluster metrics.
+ * Will clear notifications of any cluster not reporting the metrics or whose metrics indicate feed is not blocked
+ * or reindexing no longer in progress. Will set notification for clusters:
+ * - that are (Level.error) or are nearly (Level.warning) feed blocked,
+ * - that are (Level.info) currently reindexing at least 1 document type.
*/
- public void setDeploymentFeedingBlockedNotifications(DeploymentId deploymentId, List<ClusterMetrics> clusterMetrics) {
+ public void setDeploymentMetricsNotifications(DeploymentId deploymentId, List<ClusterMetrics> clusterMetrics) {
Instant now = clock.instant();
- List<Notification> feedBlockNotifications = clusterMetrics.stream()
+ List<Notification> newNotifications = clusterMetrics.stream()
.flatMap(metric -> {
- Optional<Pair<Level, String>> memoryStatus =
- resourceUtilToFeedBlockStatus("memory", metric.memoryUtil(), metric.memoryFeedBlockLimit());
- Optional<Pair<Level, String>> diskStatus =
- resourceUtilToFeedBlockStatus("disk", metric.diskUtil(), metric.diskFeedBlockLimit());
- if (memoryStatus.isEmpty() && diskStatus.isEmpty()) return Stream.empty();
-
- // Find the max among levels
- Level level = Stream.of(memoryStatus, diskStatus)
- .flatMap(status -> status.stream().map(Pair::getFirst))
- .max(Comparator.comparing(Enum::ordinal)).get();
- List<String> messages = Stream.concat(memoryStatus.stream(), diskStatus.stream())
- .filter(status -> status.getFirst() == level) // Do not mix message from different levels
- .map(Pair::getSecond)
- .collect(Collectors.toUnmodifiableList());
NotificationSource source = NotificationSource.from(deploymentId, ClusterSpec.Id.from(metric.getClusterId()));
- return Stream.of(new Notification(now, Type.feedBlock, level, source, messages));
+ return Stream.of(createFeedBlockNotification(source, now, metric),
+ createReindexNotification(source, now, metric));
})
+ .flatMap(Optional::stream)
.collect(Collectors.toUnmodifiableList());
NotificationSource deploymentSource = NotificationSource.from(deploymentId);
@@ -128,10 +148,11 @@ public class NotificationsDb {
List<Notification> updated = Stream.concat(
initial.stream()
.filter(notification ->
- // Filter out old feed block notifications for this deployment
- notification.type() != Type.feedBlock || !deploymentSource.contains(notification.source())),
+ // Filter out old feed block notifications and reindex for this deployment
+ (notification.type() != Type.feedBlock && notification.type() != Type.reindex) ||
+ !deploymentSource.contains(notification.source())),
// ... and add the new notifications for this deployment
- feedBlockNotifications.stream())
+ newNotifications.stream())
.collect(Collectors.toUnmodifiableList());
if (!initial.equals(updated))
@@ -139,6 +160,33 @@ public class NotificationsDb {
}
}
+ private static Optional<Notification> createFeedBlockNotification(NotificationSource source, Instant at, ClusterMetrics metric) {
+ Optional<Pair<Level, String>> memoryStatus =
+ resourceUtilToFeedBlockStatus("memory", metric.memoryUtil(), metric.memoryFeedBlockLimit());
+ Optional<Pair<Level, String>> diskStatus =
+ resourceUtilToFeedBlockStatus("disk", metric.diskUtil(), metric.diskFeedBlockLimit());
+ if (memoryStatus.isEmpty() && diskStatus.isEmpty()) return Optional.empty();
+
+ // Find the max among levels
+ Level level = Stream.of(memoryStatus, diskStatus)
+ .flatMap(status -> status.stream().map(Pair::getFirst))
+ .max(Comparator.comparing(Enum::ordinal)).get();
+ List<String> messages = Stream.concat(memoryStatus.stream(), diskStatus.stream())
+ .filter(status -> status.getFirst() == level) // Do not mix message from different levels
+ .map(Pair::getSecond)
+ .collect(Collectors.toUnmodifiableList());
+ return Optional.of(new Notification(at, Type.feedBlock, level, source, messages));
+ }
+
+ private static Optional<Notification> createReindexNotification(NotificationSource source, Instant at, ClusterMetrics metric) {
+ if (metric.reindexingProgress().isEmpty()) return Optional.empty();
+ List<String> messages = metric.reindexingProgress().entrySet().stream()
+ .map(entry -> String.format("document type '%s' (%.1f%% done)", entry.getKey(), 100 * entry.getValue()))
+ .sorted()
+ .collect(Collectors.toUnmodifiableList());
+ return Optional.of(new Notification(at, Type.reindex, Level.info, source, messages));
+ }
+
/**
* Returns a feed block summary for the given resource: the notification level and
* notification message for the given resource utilization wrt. given resource limit.
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
index a90f10401aa..08f1900c6e8 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
@@ -22,6 +22,8 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLog;
+import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntry;
+import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntrySerializer;
import com.yahoo.vespa.hosted.controller.deployment.Run;
import com.yahoo.vespa.hosted.controller.deployment.Step;
import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue;
@@ -221,6 +223,10 @@ public class CuratorDb {
return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout);
}
+ public Lock lockDeploymentRetriggerQueue() {
+ return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout);
+ }
+
// -------------- Helpers ------------------------------------------
/** Try locking with a low timeout, meaning it is OK to fail lock acquisition.
@@ -635,6 +641,16 @@ public class CuratorDb {
curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess)));
}
+ // -------------- Job Retrigger entries -----------------------------------
+
+ public List<RetriggerEntry> readRetriggerEntries() {
+ return readSlime(deploymentRetriggerPath()).map(RetriggerEntrySerializer::fromSlime).orElse(List.of());
+ }
+
+ public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) {
+ curator.set(deploymentRetriggerPath(), asJson(RetriggerEntrySerializer.toSlime(retriggerEntries)));
+ }
+
// -------------- Paths ---------------------------------------------------
private Path lockPath(TenantName tenant) {
@@ -772,4 +788,8 @@ public class CuratorDb {
return supportAccessRoot.append(deploymentId.dottedString());
}
+ private static Path deploymentRetriggerPath() {
+ return root.append("deploymentRetriggerQueue");
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
index 54dc102d573..06263329091 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
@@ -93,6 +93,7 @@ public class NotificationsSerializer {
case applicationPackage: return "applicationPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
+ case reindex: return "reindex";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
@@ -102,12 +103,14 @@ public class NotificationsSerializer {
case "applicationPackage": return Notification.Type.applicationPackage;
case "deployment": return Notification.Type.deployment;
case "feedBlock": return Notification.Type.feedBlock;
+ case "reindex": return Notification.Type.reindex;
default: throw new IllegalArgumentException("Unknown serialized notification type value '" + field.asString() + "'");
}
}
private static String asString(Notification.Level level) {
switch (level) {
+ case info: return "info";
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
@@ -116,6 +119,7 @@ public class NotificationsSerializer {
private static Notification.Level levelFrom(Inspector field) {
switch (field.asString()) {
+ case "info": return Notification.Level.info;
case "warning": return Notification.Level.warning;
case "error": return Notification.Level.error;
default: throw new IllegalArgumentException("Unknown serialized notification level value '" + field.asString() + "'");
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 017da94facc..23754f6d57f 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -525,12 +525,14 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
case applicationPackage: return "applicationPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
+ case reindex: return "reindex";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
+ case info: return "info";
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
@@ -997,6 +999,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
+ controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment);
return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
index ac9612a56c5..cffdd9fc928 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
@@ -16,7 +16,6 @@ import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.Controller;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLoggingRequestHandler;
@@ -134,7 +133,9 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
Inspector inspector = inspectorOrThrow(request);
// For now; mandatory fields
- Inspector hostArray = getInspectorFieldOrThrow(inspector, "hosts");
+ Inspector hostArray = inspector.field("hosts");
+ Inspector switchArray = inspector.field("switches");
+
// The impacted hostnames
List<String> hostNames = new ArrayList<>();
@@ -142,6 +143,15 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
hostArray.traverse((ArrayTraverser) (i, host) -> hostNames.add(host.asString()));
}
+ if (switchArray.valid()) {
+ List<String> switchNames = new ArrayList<>();
+ switchArray.traverse((ArrayTraverser) (i, switchName) -> switchNames.add(switchName.asString()));
+ hostNames.addAll(hostsOnSwitch(switchNames));
+ }
+
+ if (hostNames.isEmpty())
+ return ErrorResponse.badRequest("No prod hosts in provided host/switch list");
+
return doAssessment(hostNames);
}
@@ -272,13 +282,7 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
.map(HostName::from)
.collect(Collectors.toList());
- var potentialZones = controller.zoneRegistry()
- .zones()
- .reachable()
- .in(Environment.prod)
- .ids();
-
- for (var zone : potentialZones) {
+ for (var zone : getProdZones()) {
var affectedHostsInZone = controller.serviceRegistry().configServer().nodeRepository().list(zone, affectedHosts);
if (!affectedHostsInZone.isEmpty())
return Optional.of(zone);
@@ -287,4 +291,20 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
return Optional.empty();
}
+ private List<String> hostsOnSwitch(List<String> switches) {
+ return getProdZones().stream()
+ .flatMap(zone -> controller.serviceRegistry().configServer().nodeRepository().list(zone, false).stream())
+ .filter(node -> node.switchHostname().map(switches::contains).orElse(false))
+ .map(node -> node.hostname().value())
+ .collect(Collectors.toList());
+ }
+
+ private List<ZoneId> getProdZones() {
+ return controller.zoneRegistry()
+ .zones()
+ .reachable()
+ .in(Environment.prod)
+ .ids();
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java
index cba89fe39cf..59ae2a505bb 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.controller.restapi.controller;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Zone;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
@@ -19,7 +18,7 @@ import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
-import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLoggingRequestHandler;
import com.yahoo.vespa.hosted.controller.maintenance.ControllerMaintenance;
import com.yahoo.vespa.hosted.controller.maintenance.Upgrader;
@@ -124,12 +123,10 @@ public class ControllerApiHandler extends AuditLoggingRequestHandler {
SupportAccess supportAccess = controller.supportAccess().registerGrant(deployment, principal.getName(), certificate);
// Trigger deployment to include operator cert
- JobType jobType = JobType.from(controller.system(), deployment.zoneId())
- .orElseThrow(() -> new IllegalStateException("No job found to trigger for " + deployment.toUserFriendlyString()));
-
- String jobName = controller.applications().deploymentTrigger()
- .reTrigger(deployment.applicationId(), jobType).type().jobName();
- return new MessageResponse(String.format("Operator %s granted access and job %s triggered", principal.getName(), jobName));
+ Optional<JobId> jobId = controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment);
+ return new MessageResponse(
+ jobId.map(id -> String.format("Operator %s granted access and job %s triggered", principal.getName(), id.type().jobName()))
+ .orElseGet(() -> String.format("Operator %s granted access and job trigger queued", principal.getName())));
}
private <T> T requireField(Inspector inspector, String field, Function<String, T> mapper) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
index fc7a99eb2f0..78f688f545b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
@@ -27,6 +27,7 @@ import java.util.Date;
import java.util.List;
import java.util.OptionalInt;
import java.util.StringJoiner;
+import java.util.zip.Deflater;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
@@ -273,27 +274,27 @@ public class ApplicationPackageBuilder {
}
ByteArrayOutputStream zip = new ByteArrayOutputStream();
try (ZipOutputStream out = new ZipOutputStream(zip)) {
- out.putNextEntry(new ZipEntry(dir + "deployment.xml"));
- out.write(deploymentSpec());
- out.closeEntry();
- out.putNextEntry(new ZipEntry(dir + "validation-overrides.xml"));
- out.write(validationOverrides());
- out.closeEntry();
- out.putNextEntry(new ZipEntry(dir + "search-definitions/test.sd"));
- out.write(searchDefinition());
- out.closeEntry();
- out.putNextEntry(new ZipEntry(dir + "build-meta.json"));
- out.write(buildMeta(compileVersion));
- out.closeEntry();
- out.putNextEntry(new ZipEntry(dir + "security/clients.pem"));
- out.write(X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8));
- out.closeEntry();
+ out.setLevel(Deflater.NO_COMPRESSION); // This is for testing purposes so we skip compression for performance
+ writeZipEntry(out, dir + "deployment.xml", deploymentSpec());
+ writeZipEntry(out, dir + "validation-overrides.xml", validationOverrides());
+ writeZipEntry(out, dir + "search-definitions/test.sd", searchDefinition());
+ writeZipEntry(out, dir + "build-meta.json", buildMeta(compileVersion));
+ if (!trustedCertificates.isEmpty()) {
+ writeZipEntry(out, dir + "security/clients.pem", X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8));
+ }
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new ApplicationPackage(zip.toByteArray());
}
+ private void writeZipEntry(ZipOutputStream out, String name, byte[] content) throws IOException {
+ ZipEntry entry = new ZipEntry(name);
+ out.putNextEntry(entry);
+ out.write(content);
+ out.closeEntry();
+ }
+
private static String asIso8601Date(Instant instant) {
return new SimpleDateFormat("yyyy-MM-dd").format(Date.from(instant));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
index c8b4eaa5236..7077e14a648 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
@@ -11,6 +11,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
+import org.junit.Assert;
import org.junit.Test;
import java.time.Duration;
@@ -1228,4 +1229,17 @@ public class DeploymentTriggerTest {
assertEquals(List.of(), tester.jobs().active());
}
+ @Test
+ public void testRetriggerQueue() {
+ var app = tester.newDeploymentContext().submit().deploy();
+ app.submit();
+ tester.triggerJobs();
+
+ tester.deploymentTrigger().reTrigger(app.instanceId(), productionUsEast3);
+ tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3")));
+ tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3")));
+
+ List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries();
+ Assert.assertEquals(1, retriggerEntries.size());
+ }
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
index 4203051965b..098282e4e89 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
@@ -64,7 +64,6 @@ import java.util.Set;
import java.util.UUID;
import java.util.logging.Level;
import java.util.stream.Collectors;
-import java.util.stream.IntStream;
import static com.yahoo.config.provision.NodeResources.DiskSpeed.slow;
import static com.yahoo.config.provision.NodeResources.StorageType.remote;
@@ -168,18 +167,18 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
public void addNodes(List<ZoneId> zones, List<SystemApplication> applications) {
for (ZoneId zone : zones) {
for (SystemApplication application : applications) {
- List<Node> nodes = IntStream.rangeClosed(1, 3)
- .mapToObj(i -> new Node.Builder()
- .hostname(HostName.from("node-" + i + "-" + application.id().application()
- .value() + "-" + zone.value()))
- .state(Node.State.active)
- .type(application.nodeType())
- .owner(application.id())
- .currentVersion(initialVersion).wantedVersion(initialVersion)
- .currentOsVersion(Version.emptyVersion).wantedOsVersion(Version.emptyVersion)
- .build())
- .collect(Collectors.toList());
- nodeRepository().putNodes(zone, nodes);
+ for (int i = 1; i <= 3; i++) {
+ Node node = new Node.Builder()
+ .hostname(HostName.from("node-" + i + "-" + application.id().application()
+ .value() + "-" + zone.value()))
+ .state(Node.State.active)
+ .type(application.nodeType())
+ .owner(application.id())
+ .currentVersion(initialVersion).wantedVersion(initialVersion)
+ .currentOsVersion(Version.emptyVersion).wantedOsVersion(Version.emptyVersion)
+ .build();
+ nodeRepository().putNode(zone, node);
+ }
convergeServices(application.id(), zone);
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
index afb56f10c38..4079591730d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
@@ -59,9 +59,14 @@ public class NodeRepositoryMock implements NodeRepository {
/** Add or update given nodes in zone */
public void putNodes(ZoneId zone, List<Node> nodes) {
- nodeRepository.putIfAbsent(zone, new HashMap<>());
- nodeRepository.get(zone).putAll(nodes.stream().collect(Collectors.toMap(Node::hostname,
- Function.identity())));
+ Map<HostName, Node> zoneNodes = nodeRepository.computeIfAbsent(zone, (k) -> new HashMap<>());
+ for (var node : nodes) {
+ zoneNodes.put(node.hostname(), node);
+ }
+ }
+
+ public void putNode(ZoneId zone, Node node) {
+ nodeRepository.computeIfAbsent(zone, (k) -> new HashMap<>()).put(node.hostname(), node);
}
public void putApplication(ZoneId zone, Application application) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java
index 7fdbab49ba4..10fee56621c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java
@@ -78,8 +78,7 @@ public class ZoneApiMock implements ZoneApi {
public static class Builder {
- private final SystemName systemName = SystemName.defaultSystem();
-
+ private SystemName systemName = SystemName.defaultSystem();
private ZoneId id = ZoneId.defaultId();
private ZoneId virtualId ;
private CloudName cloudName = CloudName.defaultName();
@@ -90,6 +89,11 @@ public class ZoneApiMock implements ZoneApi {
return this;
}
+ public Builder withSystem(SystemName systemName) {
+ this.systemName = systemName;
+ return this;
+ }
+
public Builder withId(String id) {
return with(ZoneId.from(id));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java
new file mode 100644
index 00000000000..f3c4f9f7438
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java
@@ -0,0 +1,93 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.vespa.flags.InMemoryFlagSource;
+import com.yahoo.vespa.flags.PermanentFlags;
+import com.yahoo.vespa.hosted.controller.ControllerTester;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
+import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
+import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
+import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
+import com.yahoo.vespa.hosted.controller.tenant.LastLoginInfo;
+import com.yahoo.vespa.hosted.controller.tenant.Tenant;
+import org.junit.Test;
+
+import java.time.Duration;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author ogronnesby
+ */
+public class CloudTrialExpirerTest {
+ private final ControllerTester tester = new ControllerTester(SystemName.Public);
+ private final DeploymentTester deploymentTester = new DeploymentTester(tester);
+ private final CloudTrialExpirer expirer = new CloudTrialExpirer(tester.controller(), Duration.ofMinutes(5));
+
+ @Test
+ public void expire_inactive_tenant() {
+ registerTenant("trial-tenant", "trial", Duration.ofDays(14).plusMillis(1));
+ expirer.maintain();
+ assertPlan("trial-tenant", "none");
+ }
+
+ @Test
+ public void keep_inactive_nontrial_tenants() {
+ registerTenant("not-a-trial-tenant", "pay-as-you-go", Duration.ofDays(30));
+ expirer.maintain();
+ assertPlan("not-a-trial-tenant", "pay-as-you-go");
+ }
+
+ @Test
+ public void keep_active_trial_tenants() {
+ registerTenant("active-trial-tenant", "trial", Duration.ofHours(14).minusMillis(1));
+ expirer.maintain();
+ assertPlan("active-trial-tenant", "trial");
+ }
+
+ @Test
+ public void keep_inactive_exempt_tenants() {
+ registerTenant("exempt-trial-tenant", "trial", Duration.ofDays(40));
+ ((InMemoryFlagSource) tester.controller().flagSource()).withListFlag(PermanentFlags.EXTENDED_TRIAL_TENANTS.id(), List.of("exempt-trial-tenant"), String.class);
+ expirer.maintain();
+ assertPlan("exempt-trial-tenant", "trial");
+ }
+
+ @Test
+ public void keep_inactive_trial_tenants_with_deployments() {
+ registerTenant("with-deployments", "trial", Duration.ofDays(30));
+ registerDeployment("with-deployments", "my-app", "default", "aws-us-east-1c");
+ expirer.maintain();
+ assertPlan("with-deployments", "trial");
+ }
+
+ private void registerTenant(String tenantName, String plan, Duration timeSinceLastLogin) {
+ var name = TenantName.from(tenantName);
+ tester.createTenant(tenantName, Tenant.Type.cloud);
+ tester.serviceRegistry().billingController().setPlan(name, PlanId.from(plan), false);
+ tester.controller().tenants().updateLastLogin(name, List.of(LastLoginInfo.UserLevel.user), tester.controller().clock().instant().minus(timeSinceLastLogin));
+ }
+
+ private void registerDeployment(String tenantName, String appName, String instanceName, String regionName) {
+ var zone = ZoneApiMock.newBuilder()
+ .withSystem(tester.zoneRegistry().system())
+ .withId("prod." + regionName)
+ .build();
+ tester.zoneRegistry().setZones(zone);
+ var app = tester.createApplication(tenantName, appName, instanceName);
+ var ctx = deploymentTester.newDeploymentContext(tenantName, appName, instanceName);
+ var pkg = new ApplicationPackageBuilder()
+ .instances("default")
+ .region(regionName)
+ .trustDefaultCertificate()
+ .build();
+ ctx.submit(pkg).deploy();
+ }
+
+ private void assertPlan(String tenant, String planId) {
+ assertEquals(planId, tester.serviceRegistry().billingController().getPlan(TenantName.from(tenant)).value());
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainerTest.java
index 7dc5cb34818..4bdb657d3af 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainerTest.java
@@ -38,16 +38,13 @@ public class ControllerMaintainerTest {
public void records_metric() {
TestControllerMaintainer maintainer = new TestControllerMaintainer(tester.controller(), SystemName.main, new AtomicInteger());
maintainer.run();
- assertEquals(0L, consecutiveFailuresMetric());
assertEquals(1.0, successFactorMetric(), 0.0000001);
maintainer.success = false;
maintainer.run();
maintainer.run();
- assertEquals(2L, consecutiveFailuresMetric());
assertEquals(0.0, successFactorMetric(), 0.0000001);
maintainer.success = true;
maintainer.run();
- assertEquals(0, consecutiveFailuresMetric());
assertEquals(1.0, successFactorMetric(), 0.0000001);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
index 59fb5b596f1..c45aaa563e1 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
@@ -118,8 +118,8 @@ public class DeploymentMetricsMaintainerTest {
@Test
public void cluster_metric_aggregation_test() {
List<ClusterMetrics> clusterMetrics = List.of(
- new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0)),
- new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0)));
+ new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0), Map.of()),
+ new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0), Map.of()));
DeploymentMetrics deploymentMetrics = DeploymentMetricsMaintainer.updateDeploymentMetrics(DeploymentMetrics.none, clusterMetrics);
@@ -131,7 +131,7 @@ public class DeploymentMetricsMaintainerTest {
}
private void setMetrics(ApplicationId application, Map<String, Double> metrics) {
- var clusterMetrics = new ClusterMetrics("default", "container", metrics);
+ var clusterMetrics = new ClusterMetrics("default", "container", metrics, Map.of());
tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, ZoneId.from("dev", "us-east-1")), clusterMetrics);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java
new file mode 100644
index 00000000000..df93efab893
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java
@@ -0,0 +1,70 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
+import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
+import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntry;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.time.Duration;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author mortent
+ */
+public class RetriggerMaintainerTest {
+
+ private final DeploymentTester tester = new DeploymentTester();
+
+ @Test
+ public void processes_queue() throws IOException {
+ RetriggerMaintainer maintainer = new RetriggerMaintainer(tester.controller(), Duration.ofDays(1));
+ ApplicationId applicationId = ApplicationId.from("tenant", "app", "default");
+ var devApp = tester.newDeploymentContext(applicationId);
+ ApplicationPackage appPackage = new ApplicationPackageBuilder()
+ .region("us-west-1")
+ .build();
+
+ // Deploy app
+ devApp.runJob(JobType.devUsEast1, appPackage);
+ devApp.completeRollout();
+
+ // Trigger a run (to simulate a running job)
+ tester.deploymentTrigger().reTrigger(applicationId, JobType.devUsEast1);
+
+ // Add a job to the queue
+ tester.deploymentTrigger().reTriggerOrAddToQueue(devApp.deploymentIdIn(ZoneId.from("dev", "us-east-1")));
+
+ // Should be 1 entry in the queue:
+ List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries();
+ assertEquals(1, retriggerEntries.size());
+
+ // Adding to queue triggers abort
+ devApp.jobAborted(JobType.devUsEast1);
+ assertEquals(0, tester.jobs().active(applicationId).size());
+
+ // The maintainer runs and will actually trigger dev us-east, but keeps the entry in queue to verify it was actually run
+ maintainer.maintain();
+ retriggerEntries = tester.controller().curator().readRetriggerEntries();
+ assertEquals(1, retriggerEntries.size());
+ assertEquals(1, tester.jobs().active(applicationId).size());
+
+ // Run outstanding jobs
+ devApp.runJob(JobType.devUsEast1);
+ assertEquals(0, tester.jobs().active(applicationId).size());
+
+ // Run maintainer again, should find that the job has already run successfully and will remove the entry.
+ maintainer.maintain();
+ retriggerEntries = tester.controller().curator().readRetriggerEntries();
+ assertEquals(0, retriggerEntries.size());
+ assertEquals(0, tester.jobs().active(applicationId).size());
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
index 7b4882de3ff..29d77c38b1a 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
@@ -16,7 +16,6 @@ import java.time.Duration;
import java.util.Map;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
/**
* Tests the traffic fraction updater. This also tests its dependency on DeploymentMetricsMaintainer.
@@ -82,7 +81,7 @@ public class TrafficShareUpdaterTest {
}
private void setQpsMetric(double qps, ApplicationId application, ZoneId zone, DeploymentTester tester) {
- var clusterMetrics = new ClusterMetrics("default", "container", Map.of(ClusterMetrics.QUERIES_PER_SECOND, qps));
+ var clusterMetrics = new ClusterMetrics("default", "container", Map.of(ClusterMetrics.QUERIES_PER_SECOND, qps), Map.of());
tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, zone), clusterMetrics);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
index 484b471cbaa..326f4bf311e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
@@ -164,7 +164,6 @@ public class UpgraderTest {
tester.triggerJobs();
assertEquals("Upgrade with error should retry", 1, tester.jobs().active().size());
-
// --- Failing application is repaired by changing the application, causing confidence to move above 'high' threshold
// Deploy application change
default0.submit(applicationPackage("default"));
@@ -1114,11 +1113,32 @@ public class UpgraderTest {
assertEquals("Upgrade orders are distinct", versions.size(), upgradeOrders.size());
}
+ private static final ApplicationPackage canaryApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("canary")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ private static final ApplicationPackage defaultApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("default")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ private static final ApplicationPackage conservativeApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("conservative")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ /** Returns empty prebuilt applications for efficiency */
private ApplicationPackage applicationPackage(String upgradePolicy) {
- return new ApplicationPackageBuilder().upgradePolicy(upgradePolicy)
- .region("us-west-1")
- .region("us-east-3")
- .build();
+ switch (upgradePolicy) {
+ case "canary" : return canaryApplicationPackage;
+ case "default" : return defaultApplicationPackage;
+ case "conservative" : return conservativeApplicationPackage;
+ default : throw new IllegalArgumentException("No upgrade policy '" + upgradePolicy + "'");
+ }
}
private DeploymentContext createAndDeploy(String applicationName, String upgradePolicy) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
index 5bd7d1db769..454a4f81524 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
@@ -22,7 +22,9 @@ import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -105,57 +107,94 @@ public class NotificationsDbTest {
List<Notification> expected = new ArrayList<>(notifications);
// No metrics, no new notification
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of());
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of());
assertEquals(expected, curatorDb.readNotifications(tenant));
// Metrics that contain none of the feed block metrics does not create new notification
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null)));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null, Map.of())));
assertEquals(expected, curatorDb.readNotifications(tenant));
// Metrics that only contain util or limit (should not be possible) should not cause any issues
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, null, null, 0.5)));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, null, null, 0.5, Map.of())));
assertEquals(expected, curatorDb.readNotifications(tenant));
// One resource is at warning
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5)));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5, Map.of())));
expected.add(notification(12345, Type.feedBlock, Level.warning, sourceCluster1, "disk (usage: 85.0%, feed block limit: 90.0%)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
// Both resources over the limit
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5)));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5, Map.of())));
expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster1, "disk (usage: 95.0%, feed block limit: 90.0%)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
// One resource at warning, one at error: Only show error message
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.7, 0.5)));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.7, 0.5, Map.of())));
expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster1,
"memory (usage: 70.0%, feed block limit: 50.0%)", "disk (usage: 95.0%, feed block limit: 90.0%)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
}
@Test
- public void feed_blocked_multiple_cluster_test() {
+ public void deployment_metrics_multiple_cluster_test() {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenant.value(), "app1", "instance1"), ZoneId.from("prod", "us-south-3"));
NotificationSource sourceCluster1 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster1"));
NotificationSource sourceCluster2 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster2"));
NotificationSource sourceCluster3 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster3"));
List<Notification> expected = new ArrayList<>(notifications);
- // Cluster1 and cluster2 are having issues
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(
- clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75), clusterMetrics("cluster3", 0.1, 0.8, 0.2, 0.9)));
+ // Cluster1 and cluster2 are having feed block issues, cluster 3 is reindexing
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(
+ clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5, Map.of()), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75, Map.of()), clusterMetrics("cluster3", 0.1, 0.8, 0.2, 0.9, Map.of("announcements", 0.75, "build", 0.5))));
expected.add(notification(12345, Type.feedBlock, Level.warning, sourceCluster1, "disk (usage: 85.0%, feed block limit: 90.0%)"));
expected.add(notification(12345, Type.feedBlock, Level.error, sourceCluster2, "memory (usage: 90.0%, feed block limit: 75.0%)"));
+ expected.add(notification(12345, Type.reindex, Level.info, sourceCluster3, "document type 'announcements' (75.0% done)", "document type 'build' (50.0% done)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
- // Cluster1 improves, while cluster3 starts having issues
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(
- clusterMetrics("cluster1", 0.15, 0.9, 0.3, 0.5), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75), clusterMetrics("cluster3", 0.75, 0.8, 0.2, 0.9)));
+ // Cluster1 improves, while cluster3 starts having feed block issues and finishes reindexing 'build' documents
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(
+ clusterMetrics("cluster1", 0.15, 0.9, 0.3, 0.5, Map.of()), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75, Map.of()), clusterMetrics("cluster3", 0.75, 0.8, 0.2, 0.9, Map.of("announcements", 0.9))));
expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster2, "memory (usage: 90.0%, feed block limit: 75.0%)"));
expected.set(7, notification(12345, Type.feedBlock, Level.warning, sourceCluster3, "disk (usage: 75.0%, feed block limit: 80.0%)"));
+ expected.set(8, notification(12345, Type.reindex, Level.info, sourceCluster3, "document type 'announcements' (90.0% done)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
}
+ @Test
+ public void removes_invalid_deployment_notifications() {
+ curatorDb.deleteNotifications(tenant); // Remove notifications set in init()
+
+ ZoneId z1 = ZoneId.from("prod", "us-west-1");
+ ZoneId z2 = ZoneId.from("prod", "eu-south-2");
+ DeploymentId d1 = new DeploymentId(ApplicationId.from("t1", "a1", "i1"), z1);
+ DeploymentId d2 = new DeploymentId(ApplicationId.from("t1", "a1", "i1"), z2);
+ DeploymentId d3 = new DeploymentId(ApplicationId.from("t1", "a1", "i2"), z1);
+ DeploymentId d4 = new DeploymentId(ApplicationId.from("t1", "a2", "i1"), z2);
+ DeploymentId d5 = new DeploymentId(ApplicationId.from("t2", "a1", "i1"), z2);
+
+ List<Notification> notifications = Stream.of(d1, d2, d3, d4, d5)
+ .flatMap(deployment -> Stream.of(Type.deployment, Type.feedBlock)
+ .map(type -> new Notification(Instant.EPOCH, type, Level.warning, NotificationSource.from(deployment), List.of("msg"))))
+ .collect(Collectors.toUnmodifiableList());
+ notifications.stream().collect(Collectors.groupingBy(notification -> notification.source().tenant(), Collectors.toList()))
+ .forEach(curatorDb::writeNotifications);
+
+ // All except d3 plus a deployment that has no notifications
+ Set<DeploymentId> allDeployments = Set.of(d1, d2, d4, d5, new DeploymentId(ApplicationId.from("t3", "a1", "i1"), z1));
+ notificationsDb.removeNotificationsForRemovedInstances(allDeployments);
+
+ List<Notification> expectedNotifications = new ArrayList<>(notifications);
+ // Only the deployment notification for d3 should be cleared (the other types already correctly clear themselves)
+ expectedNotifications.remove(4);
+
+ List<Notification> actualNotifications = curatorDb.listNotifications().stream()
+ .flatMap(tenant -> curatorDb.readNotifications(tenant).stream())
+ .collect(Collectors.toUnmodifiableList());
+
+ assertEquals(expectedNotifications.stream().map(Notification::toString).collect(Collectors.joining("\n")),
+ actualNotifications.stream().map(Notification::toString).collect(Collectors.joining("\n")));
+ }
+
@Before
public void init() {
curatorDb.writeNotifications(tenant, notifications);
@@ -169,12 +208,14 @@ public class NotificationsDbTest {
return new Notification(Instant.ofEpochSecond(secondsSinceEpoch), type, level, source, List.of(messages));
}
- private static ClusterMetrics clusterMetrics(String clusterId, Double diskUtil, Double diskLimit, Double memoryUtil, Double memoryLimit) {
+ private static ClusterMetrics clusterMetrics(String clusterId,
+ Double diskUtil, Double diskLimit, Double memoryUtil, Double memoryLimit,
+ Map<String, Double> reindexingProgress) {
Map<String, Double> metrics = new HashMap<>();
if (diskUtil != null) metrics.put(ClusterMetrics.DISK_UTIL, diskUtil);
if (diskLimit != null) metrics.put(ClusterMetrics.DISK_FEED_BLOCK_LIMIT, diskLimit);
if (memoryUtil != null) metrics.put(ClusterMetrics.MEMORY_UTIL, memoryUtil);
if (memoryLimit != null) metrics.put(ClusterMetrics.MEMORY_FEED_BLOCK_LIMIT, memoryLimit);
- return new ClusterMetrics(clusterId, "content", metrics);
+ return new ClusterMetrics(clusterId, "content", metrics, reindexingProgress);
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index f4b8a643e28..01d39c0ea1c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -1554,6 +1554,9 @@ public class ApplicationApiTest extends ControllerContainerTest {
List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(1, activeGrants.size());
+ // Adding grant should trigger job
+ app.assertRunning(JobType.productionUsWest1);
+
// DELETE removes access
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"}")
@@ -1563,6 +1566,9 @@ public class ApplicationApiTest extends ControllerContainerTest {
disallowedResponse, 200
);
+ // Revoking access should trigger job
+ app.assertRunning(JobType.productionUsWest1);
+
// Should be no available grant
activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
assertEquals(0, activeGrants.size());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
index d87da62b8f2..80cee3af58b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
@@ -27,7 +27,6 @@ import java.time.Instant;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.List;
-import java.util.UUID;
import static org.junit.Assert.assertEquals;
@@ -52,6 +51,7 @@ public class ChangeManagementApiHandlerTest extends ControllerContainerTest {
@Test
public void test_api() {
assertFile(new Request("http://localhost:8080/changemanagement/v1/assessment", "{\"zone\":\"prod.us-east-3\", \"hosts\": [\"host1\"]}", Request.Method.POST), "initial.json");
+ assertFile(new Request("http://localhost:8080/changemanagement/v1/assessment", "{\"zone\":\"prod.us-east-3\", \"switches\": [\"switch1\"]}", Request.Method.POST), "initial.json");
assertFile(new Request("http://localhost:8080/changemanagement/v1/vcmr"), "vcmrs.json");
}
@@ -98,6 +98,7 @@ public class ChangeManagementApiHandlerTest extends ControllerContainerTest {
private Node createNode() {
return new Node.Builder()
.hostname(HostName.from("host1"))
+ .switchHostname("switch1")
.build();
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
index 3cf79977fb8..e906df94023 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
@@ -19,6 +19,9 @@
"name": "CloudEventReporter"
},
{
+ "name": "CloudTrialExpirer"
+ },
+ {
"name": "ContactInformationMaintainer"
},
{
@@ -76,6 +79,9 @@
"name": "ResourceTagMaintainer"
},
{
+ "name":"RetriggerMaintainer"
+ },
+ {
"name": "SystemRoutingPolicyMaintainer"
},
{
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index 047a4461f7c..79b564eee52 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -66,9 +66,9 @@ public class RoutingPoliciesTest {
private static final ZoneId zone3 = ZoneId.from("prod", "aws-us-east-1a");
private static final ZoneId zone4 = ZoneId.from("prod", "aws-us-east-1b");
- private final ApplicationPackage applicationPackage = applicationPackageBuilder().region(zone1.region())
- .region(zone2.region())
- .build();
+ private static final ApplicationPackage applicationPackage = applicationPackageBuilder().region(zone1.region())
+ .region(zone2.region())
+ .build();
@Test
public void global_routing_policies() {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
index 77ce86f1664..4dd283cf5d7 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
@@ -127,11 +127,7 @@ public class VersionStatusTest {
@Test
public void testVersionStatusAfterApplicationUpdates() {
DeploymentTester tester = new DeploymentTester();
- ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
- .upgradePolicy("default")
- .region("us-west-1")
- .region("us-east-3")
- .build();
+ ApplicationPackage applicationPackage = applicationPackage("default");
Version version1 = new Version("6.2");
Version version2 = new Version("6.3");
@@ -216,10 +212,9 @@ public class VersionStatusTest {
Version version0 = new Version("6.2");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().maintain();
- var builder = new ApplicationPackageBuilder().region("us-west-1").region("us-east-3");
// Setup applications - all running on version0
- ApplicationPackage canaryPolicy = builder.upgradePolicy("canary").build();
+ ApplicationPackage canaryPolicy = applicationPackage("canary");
var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default")
.submit(canaryPolicy)
.deploy();
@@ -230,7 +225,7 @@ public class VersionStatusTest {
.submit(canaryPolicy)
.deploy();
- ApplicationPackage defaultPolicy = builder.upgradePolicy("default").build();
+ ApplicationPackage defaultPolicy = applicationPackage("default");
var default0 = tester.newDeploymentContext("tenant1", "default0", "default")
.submit(defaultPolicy)
.deploy();
@@ -262,7 +257,7 @@ public class VersionStatusTest {
.submit(defaultPolicy)
.deploy();
- ApplicationPackage conservativePolicy = builder.upgradePolicy("conservative").build();
+ ApplicationPackage conservativePolicy = applicationPackage("conservative");
var conservative0 = tester.newDeploymentContext("tenant1", "conservative0", "default")
.submit(conservativePolicy)
.deploy();
@@ -388,10 +383,10 @@ public class VersionStatusTest {
Version version0 = new Version("6.2");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().maintain();
- var appPackage = new ApplicationPackageBuilder().region("us-west-1").region("us-east-3").upgradePolicy("canary");
+ var appPackage = applicationPackage("canary");
var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default")
- .submit(appPackage.build())
+ .submit(appPackage)
.deploy();
assertEquals("All applications running on this version: High",
@@ -537,13 +532,13 @@ public class VersionStatusTest {
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default")
- .submit(new ApplicationPackageBuilder().upgradePolicy("canary").region("us-west-1").build())
+ .submit(applicationPackage("canary"))
.deploy();
var canary1 = tester.newDeploymentContext("tenant1", "canary1", "default")
- .submit(new ApplicationPackageBuilder().upgradePolicy("canary").region("us-west-1").build())
+ .submit(applicationPackage("canary"))
.deploy();
var default0 = tester.newDeploymentContext("tenant1", "default0", "default")
- .submit(new ApplicationPackageBuilder().upgradePolicy("default").region("us-west-1").build())
+ .submit(applicationPackage("default"))
.deploy();
tester.controllerTester().computeVersionStatus();
assertSame(Confidence.high, tester.controller().readVersionStatus().version(version0).confidence());
@@ -609,12 +604,11 @@ public class VersionStatusTest {
public void testStatusIncludesIncompleteUpgrades() {
var tester = new DeploymentTester().atMondayMorning();
var version0 = Version.fromString("7.1");
- var applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
// Application deploys on initial version
tester.controllerTester().upgradeSystem(version0);
var context = tester.newDeploymentContext("tenant1", "default0", "default");
- context.submit(applicationPackage).deploy();
+ context.submit(applicationPackage("default")).deploy();
// System is upgraded and application starts upgrading to next version
var version1 = Version.fromString("7.2");
@@ -688,4 +682,32 @@ public class VersionStatusTest {
.orElseThrow(() -> new IllegalArgumentException("Expected to find version: " + version));
}
+ private static final ApplicationPackage canaryApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("canary")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ private static final ApplicationPackage defaultApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("default")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ private static final ApplicationPackage conservativeApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("conservative")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ /** Returns empty prebuilt applications for efficiency */
+ private ApplicationPackage applicationPackage(String upgradePolicy) {
+ switch (upgradePolicy) {
+ case "canary" : return canaryApplicationPackage;
+ case "default" : return defaultApplicationPackage;
+ case "conservative" : return conservativeApplicationPackage;
+ default : throw new IllegalArgumentException("No upgrade policy '" + upgradePolicy + "'");
+ }
+ }
+
}
diff --git a/default_build_settings.cmake b/default_build_settings.cmake
index 3fd93bd0c25..a61410ebf31 100644
--- a/default_build_settings.cmake
+++ b/default_build_settings.cmake
@@ -31,11 +31,7 @@ endfunction()
function(setup_vespa_default_build_settings_centos_8)
message("-- Setting up default build settings for centos 8")
set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" "/usr/include/openblas" PARENT_SCOPE)
- if (VESPA_OS_DISTRO_NAME STREQUAL "CentOS Stream")
- set(DEFAULT_VESPA_LLVM_VERSION "11" PARENT_SCOPE)
- else()
- set(DEFAULT_VESPA_LLVM_VERSION "10" PARENT_SCOPE)
- endif()
+ set(DEFAULT_VESPA_LLVM_VERSION "11" PARENT_SCOPE)
endfunction()
function(setup_vespa_default_build_settings_darwin)
diff --git a/dist/vespa.spec b/dist/vespa.spec
index a7569980c0c..13ea9a733e1 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -16,6 +16,7 @@
%define _create_vespa_user 1
%define _create_vespa_service 1
%define _defattr_is_vespa_vespa 0
+%define _command_cmake cmake3
Name: vespa
Version: _VESPA_VERSION_
@@ -42,11 +43,11 @@ BuildRequires: maven
%define _java_home /usr/lib/jvm/java-11-amazon-corretto.%{?_arch}
BuildRequires: python3-pytest
%else
-BuildRequires: devtoolset-9-gcc-c++
-BuildRequires: devtoolset-9-libatomic-devel
-BuildRequires: devtoolset-9-binutils
+BuildRequires: devtoolset-10-gcc-c++
+BuildRequires: devtoolset-10-libatomic-devel
+BuildRequires: devtoolset-10-binutils
BuildRequires: rh-maven35
-%define _devtoolset_enable /opt/rh/devtoolset-9/enable
+%define _devtoolset_enable /opt/rh/devtoolset-10/enable
%define _rhmaven35_enable /opt/rh/rh-maven35/enable
BuildRequires: python36-pytest
%endif
@@ -54,19 +55,9 @@ BuildRequires: vespa-pybind11-devel
BuildRequires: python3-devel
%endif
%if 0%{?el8}
-%if 0%{?centos}
-%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0)
-%endif
-%if 0%{?_centos_stream}
BuildRequires: gcc-toolset-10-gcc-c++
BuildRequires: gcc-toolset-10-binutils
%define _devtoolset_enable /opt/rh/gcc-toolset-10/enable
-BuildRequires: vespa-boost-devel >= 1.75.0-1
-%else
-BuildRequires: gcc-toolset-9-gcc-c++
-BuildRequires: gcc-toolset-9-binutils
-%define _devtoolset_enable /opt/rh/gcc-toolset-9/enable
-%endif
BuildRequires: maven
BuildRequires: pybind11-devel
BuildRequires: python3-pytest
@@ -82,7 +73,7 @@ BuildRequires: python3-devel
%if 0%{?el7}
BuildRequires: cmake3
BuildRequires: llvm7.0-devel
-BuildRequires: vespa-boost-devel >= 1.59.0-6
+BuildRequires: vespa-boost-devel >= 1.76.0-1
BuildRequires: vespa-gtest >= 1.8.1-1
BuildRequires: vespa-icu-devel >= 65.1.0-1
BuildRequires: vespa-lz4-devel >= 1.9.2-2
@@ -101,12 +92,15 @@ BuildRequires: vespa-libzstd-devel >= 1.4.5-2
%endif
%if 0%{?el8}
BuildRequires: cmake >= 3.11.4-3
-%if 0%{?_centos_stream}
-BuildRequires: llvm-devel >= 11.0.0
+%if 0%{?centos}
+# Current cmake on CentOS 8 is broken and manually requires libarchive install
+BuildRequires: libarchive
+%define _command_cmake cmake
+BuildRequires: (llvm-devel >= 11.0.0 and llvm-devel < 12)
%else
-BuildRequires: llvm-devel >= 10.0.1
+BuildRequires: (llvm-devel >= 10.0.1 and llvm-devel < 11)
%endif
-BuildRequires: boost-devel >= 1.66
+BuildRequires: vespa-boost-devel >= 1.76.0-1
BuildRequires: openssl-devel
BuildRequires: vespa-gtest >= 1.8.1-1
BuildRequires: vespa-lz4-devel >= 1.9.2-2
@@ -152,7 +146,7 @@ BuildRequires: gmock-devel
%endif
%if 0%{?el7} && 0%{?amzn2}
BuildRequires: vespa-xxhash-devel = 0.8.0
-BuildRequires: vespa-openblas-devel = 0.3.12
+BuildRequires: vespa-openblas-devel = 0.3.15
BuildRequires: vespa-re2-devel = 20190801
%else
BuildRequires: xxhash-devel >= 0.8.0
@@ -225,7 +219,7 @@ Requires: vespa-valgrind >= 3.17.0-1
%endif
%endif
%if 0%{?el8}
-%if 0%{?_centos_stream}
+%if 0%{?centos}
%define _vespa_llvm_version 11
%else
%define _vespa_llvm_version 10
@@ -315,7 +309,7 @@ Requires: vespa-libzstd >= 1.4.5-2
Requires: openblas
%else
%if 0%{?amzn2}
-Requires: vespa-openblas
+Requires: vespa-openblas = 0.3.15
%else
Requires: openblas-serial
%endif
@@ -353,10 +347,10 @@ Requires: libicu
Requires: openssl-libs
%endif
%if 0%{?el8}
-%if 0%{?_centos_stream}
-Requires: llvm-libs >= 11.0.0
+%if 0%{?centos}
+Requires: (llvm-libs >= 11.0.0 and llvm-libs < 12)
%else
-Requires: llvm-libs >= 10.0.1
+Requires: (llvm-libs >= 10.0.1 and llvm-libs < 11)
%endif
Requires: vespa-protobuf = 3.7.0-5.el8
%endif
@@ -488,7 +482,7 @@ mvn --batch-mode -e -N io.takari:maven:wrapper -Dmaven=3.6.3
%endif
%{?_use_mvn_wrapper:env VESPA_MAVEN_COMMAND=$(pwd)/mvnw }sh bootstrap.sh java
%{?_use_mvn_wrapper:./mvnw}%{!?_use_mvn_wrapper:mvn} --batch-mode -nsu -T 1C install -Dmaven.test.skip=true -Dmaven.javadoc.skip=true
-cmake3 -DCMAKE_INSTALL_PREFIX=%{_prefix} \
+%{_command_cmake} -DCMAKE_INSTALL_PREFIX=%{_prefix} \
-DJAVA_HOME=$JAVA_HOME \
-DCMAKE_PREFIX_PATH=%{_vespa_deps_prefix} \
-DEXTRA_LINK_DIRECTORY="%{_extra_link_directory}" \
diff --git a/document/src/main/java/com/yahoo/document/StructDataType.java b/document/src/main/java/com/yahoo/document/StructDataType.java
index 73fe580308e..8a153856eff 100644
--- a/document/src/main/java/com/yahoo/document/StructDataType.java
+++ b/document/src/main/java/com/yahoo/document/StructDataType.java
@@ -22,7 +22,7 @@ public class StructDataType extends BaseStructDataType {
super(name);
}
- public StructDataType(int id,String name) {
+ public StructDataType(int id, String name) {
super(id, name);
}
diff --git a/document/src/main/java/com/yahoo/document/StructuredDataType.java b/document/src/main/java/com/yahoo/document/StructuredDataType.java
index e4bb94a5465..8a5f344e79e 100644
--- a/document/src/main/java/com/yahoo/document/StructuredDataType.java
+++ b/document/src/main/java/com/yahoo/document/StructuredDataType.java
@@ -10,8 +10,6 @@ import java.util.Collection;
import java.util.List;
/**
- * TODO: What is this and why
- *
* @author HÃ¥kon Humberset
*/
public abstract class StructuredDataType extends DataType {
diff --git a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
index 0449612da6f..f4139a597d2 100644
--- a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
+++ b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
@@ -2,7 +2,8 @@
package com.yahoo.document;
/**
- * Internal class, DO NOT USE!!&nbsp;Only public because it must be used from com.yahoo.searchdefinition.parser.
+ * Internal class, DO NOT USE!!
+ * Only public because it must be used from com.yahoo.searchdefinition.parser.
*
* @author Einar M R Rosenvinge
*/
diff --git a/eval/src/vespa/eval/eval/array_array_map.h b/eval/src/vespa/eval/eval/array_array_map.h
index 89fa0c77819..f49e3e4edbb 100644
--- a/eval/src/vespa/eval/eval/array_array_map.h
+++ b/eval/src/vespa/eval/eval/array_array_map.h
@@ -116,7 +116,7 @@ private:
_keys.push_back(k);
}
}
- _values.resize(_values.size() + _values_per_entry, V{});
+ _values.resize(_values.size() + _values_per_entry);
auto [pos, was_inserted] = _map.insert(MyKey{{tag_id},hash});
assert(was_inserted);
return Tag{tag_id};
diff --git a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
index d7700467494..c4848140b2d 100644
--- a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
+++ b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
@@ -103,7 +103,7 @@ public class FileDownloaderTest {
{
// fileReference does not exist on disk, needs to be downloaded)
- FileReference fileReference = new FileReference("fileReference");
+ FileReference fileReference = new FileReference("baz");
File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference);
assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent());
@@ -202,7 +202,7 @@ public class FileDownloaderTest {
// Delay response so that we can make a second request while downloading the file from the first request
connection.setResponseHandler(new MockConnection.WaitResponseHandler(Duration.ofSeconds(1)));
- FileReference fileReference = new FileReference("fileReference");
+ FileReference fileReference = new FileReference("fileReference123");
File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference);
FileReferenceDownload fileReferenceDownload = new FileReferenceDownload(fileReference);
@@ -237,25 +237,25 @@ public class FileDownloaderTest {
MockConnection connectionPool = new MockConnection();
connectionPool.setResponseHandler(new MockConnection.WaitResponseHandler(timeout.plus(Duration.ofMillis(1000))));
FileDownloader fileDownloader = new FileDownloader(connectionPool, downloadDir, downloads, timeout, sleepBetweenRetries);
- FileReference foo = new FileReference("foo");
+ FileReference xyzzy = new FileReference("xyzzy");
// Should download since we do not have the file on disk
- fileDownloader.downloadIfNeeded(new FileReferenceDownload(foo));
- assertTrue(fileDownloader.isDownloading(foo));
- assertFalse(fileDownloader.getFile(foo).isPresent());
+ fileDownloader.downloadIfNeeded(new FileReferenceDownload(xyzzy));
+ assertTrue(fileDownloader.isDownloading(xyzzy));
+ assertFalse(fileDownloader.getFile(xyzzy).isPresent());
// Receive files to simulate download
- receiveFile();
+ receiveFile(fileDownloader, xyzzy, "xyzzy.jar", FileReferenceData.Type.file, "content");
// Should not download, since file has already been downloaded
- fileDownloader.downloadIfNeeded(new FileReferenceDownload(foo));
+ fileDownloader.downloadIfNeeded(new FileReferenceDownload(xyzzy));
// and file should be available
- assertTrue(fileDownloader.getFile(foo).isPresent());
+ assertTrue(fileDownloader.getFile(xyzzy).isPresent());
}
@Test
public void receiveFile() throws IOException {
- FileReference foo = new FileReference("foo");
+ FileReference foobar = new FileReference("foobar");
String filename = "foo.jar";
- receiveFile(fileDownloader, foo, filename, FileReferenceData.Type.file, "content");
- File downloadedFile = new File(fileReferenceFullPath(downloadDir, foo), filename);
+ receiveFile(fileDownloader, foobar, filename, FileReferenceData.Type.file, "content");
+ File downloadedFile = new File(fileReferenceFullPath(downloadDir, foobar), filename);
assertEquals("content", IOUtils.readFile(downloadedFile));
}
diff --git a/flags/pom.xml b/flags/pom.xml
index 4f1bdcb61e3..3774ab3bf5f 100644
--- a/flags/pom.xml
+++ b/flags/pom.xml
@@ -93,6 +93,11 @@
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.junit.vintage</groupId>
+ <artifactId>junit-vintage-engine</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
<plugins>
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java b/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java
index 92b7b3bc04d..ec49c1b0eff 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java
@@ -6,6 +6,7 @@ import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.util.Collection;
+import java.util.concurrent.atomic.AtomicReference;
import static com.yahoo.yolean.Exceptions.uncheck;
@@ -15,7 +16,8 @@ import static com.yahoo.yolean.Exceptions.uncheck;
* @author hakonhall
*/
public class JsonNodeRawFlag implements RawFlag {
- private static final ObjectMapper mapper = new ObjectMapper();
+
+ private static final AtomicReference<ObjectMapper> mapper = new AtomicReference<>();
private final JsonNode jsonNode;
@@ -24,7 +26,7 @@ public class JsonNodeRawFlag implements RawFlag {
}
public static JsonNodeRawFlag fromJson(String json) {
- return new JsonNodeRawFlag(uncheck(() -> mapper.readTree(json)));
+ return new JsonNodeRawFlag(uncheck(() -> objectMapper().readTree(json)));
}
public static JsonNodeRawFlag fromJsonNode(JsonNode jsonNode) {
@@ -32,20 +34,20 @@ public class JsonNodeRawFlag implements RawFlag {
}
public static <T> JsonNodeRawFlag fromJacksonClass(T value) {
- return new JsonNodeRawFlag(uncheck(() -> mapper.valueToTree(value)));
+ return new JsonNodeRawFlag(uncheck(() -> objectMapper().valueToTree(value)));
}
public <T> T toJacksonClass(Class<T> jacksonClass) {
- return uncheck(() -> mapper.treeToValue(jsonNode, jacksonClass));
+ return uncheck(() -> objectMapper().treeToValue(jsonNode, jacksonClass));
}
public <T> T toJacksonClass(JavaType jacksonClass) {
- return uncheck(() -> mapper.readValue(jsonNode.toString(), jacksonClass));
+ return uncheck(() -> objectMapper().readValue(jsonNode.toString(), jacksonClass));
}
@SuppressWarnings("rawtypes")
public static JavaType constructCollectionType(Class<? extends Collection> collectionClass, Class<?> elementClass) {
- return mapper.getTypeFactory().constructCollectionType(collectionClass, elementClass);
+ return objectMapper().getTypeFactory().constructCollectionType(collectionClass, elementClass);
}
@Override
@@ -57,4 +59,14 @@ public class JsonNodeRawFlag implements RawFlag {
public String asJson() {
return jsonNode.toString();
}
+
+ /** Initialize object mapper lazily */
+ private static ObjectMapper objectMapper() {
+ // ObjectMapper is a heavy-weight object so we construct it only when we need it
+ return mapper.updateAndGet((objectMapper) -> {
+ if (objectMapper != null) return objectMapper;
+ return new ObjectMapper();
+ });
+ }
+
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
index 52b09a281d5..1b7f0c034a3 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
@@ -163,6 +163,13 @@ public class PermanentFlags {
"Takes effect immediately, but any current excess rebuilds will not be cancelled"
);
+ public static final UnboundListFlag<String> EXTENDED_TRIAL_TENANTS = defineListFlag(
+ "extended-trial-tenants", List.of(), String.class,
+ "Tenants that will not be expired from their trial plan",
+ "Takes effect immediately, used by the CloudTrialExpirer maintainer",
+ TENANT_ID
+ );
+
private PermanentFlags() {}
private static UnboundBooleanFlag defineFeatureFlag(
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
index 4c384b09fad..ce8fed0aa70 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
@@ -29,7 +29,10 @@ public class CoreCollector {
private static final Pattern CORE_GENERATOR_PATH_PATTERN = Pattern.compile("^Core was generated by `(?<path>.*?)'.$");
private static final Pattern EXECFN_PATH_PATTERN = Pattern.compile("^.* execfn: '(?<path>.*?)'");
private static final Pattern FROM_PATH_PATTERN = Pattern.compile("^.* from '(?<path>.*?)'");
- static final String GDB_PATH = "/opt/rh/devtoolset-9/root/bin/gdb";
+ static final String GDB_PATH_RHEL7_DT9 = "/opt/rh/devtoolset-9/root/bin/gdb";
+ static final String GDB_PATH_RHEL7_DT10 = "/opt/rh/devtoolset-10/root/bin/gdb";
+ static final String GDB_PATH_RHEL8 = "/opt/rh/gcc-toolset-10/root/bin/gdb";
+
static final Map<String, Object> JAVA_HEAP_DUMP_METADATA =
Map.of("bin_path", "java", "backtrace", List.of("Heap dump, no backtrace available"));
@@ -39,8 +42,23 @@ public class CoreCollector {
this.docker = docker;
}
+ String getGdbPath(NodeAgentContext context) {
+ // TODO: Remove when we do not have any devtoolset-9 installs left
+ String[] command_rhel7_dt9 = {"stat", GDB_PATH_RHEL7_DT9};
+ if (docker.executeCommandInContainerAsRoot(context, command_rhel7_dt9).getExitStatus() == 0) {
+ return GDB_PATH_RHEL7_DT9;
+ }
+
+ String[] command_rhel7_dt10 = {"stat", GDB_PATH_RHEL7_DT10};
+ if (docker.executeCommandInContainerAsRoot(context, command_rhel7_dt10).getExitStatus() == 0) {
+ return GDB_PATH_RHEL7_DT10;
+ }
+
+ return GDB_PATH_RHEL8;
+ }
+
Path readBinPathFallback(NodeAgentContext context, Path coredumpPath) {
- String command = GDB_PATH + " -n -batch -core " + coredumpPath + " | grep \'^Core was generated by\'";
+ String command = getGdbPath(context) + " -n -batch -core " + coredumpPath + " | grep \'^Core was generated by\'";
String[] wrappedCommand = {"/bin/sh", "-c", command};
ProcessResult result = docker.executeCommandInContainerAsRoot(context, wrappedCommand);
@@ -79,7 +97,7 @@ public class CoreCollector {
List<String> readBacktrace(NodeAgentContext context, Path coredumpPath, Path binPath, boolean allThreads) {
String threads = allThreads ? "thread apply all bt" : "bt";
- String[] command = {GDB_PATH, "-n", "-ex", threads, "-batch", binPath.toString(), coredumpPath.toString()};
+ String[] command = {getGdbPath(context), "-n", "-ex", threads, "-batch", binPath.toString(), coredumpPath.toString()};
ProcessResult result = docker.executeCommandInContainerAsRoot(context, command);
if (result.getExitStatus() != 0)
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
index 2827e99c697..d61ab9e53b8 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
@@ -12,7 +12,9 @@ import java.nio.file.Paths;
import java.util.List;
import java.util.Map;
-import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH;
+import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL7_DT9;
+import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL7_DT10;
+import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL8;
import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.JAVA_HEAP_DUMP_METADATA;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
@@ -60,9 +62,10 @@ public class CoreCollectorTest {
"execfn: '/usr/bin/program', platform: 'x86_64");
assertEquals(TEST_BIN_PATH, coreCollector.readBinPath(context, TEST_CORE_PATH));
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
Path fallbackResponse = Paths.get("/response/from/fallback");
- mockExec(new String[]{"/bin/sh", "-c", GDB_PATH + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
+ mockExec(new String[]{"/bin/sh", "-c", GDB_PATH_RHEL7_DT9 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
"Core was generated by `/response/from/fallback'.");
mockExec(cmd,
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style");
@@ -74,8 +77,11 @@ public class CoreCollectorTest {
@Test
public void extractsBinaryPathUsingGdbTest() {
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory");
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "The stat output");
+
final String[] cmd = new String[]{"/bin/sh", "-c",
- GDB_PATH + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"};
+ GDB_PATH_RHEL7_DT10 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"};
mockExec(cmd, "Core was generated by `/usr/bin/program-from-gdb --identity foo/search/cluster.content_'.");
assertEquals(Paths.get("/usr/bin/program-from-gdb"), coreCollector.readBinPathFallback(context, TEST_CORE_PATH));
@@ -86,30 +92,34 @@ public class CoreCollectorTest {
fail("Expected not to be able to get bin path");
} catch (RuntimeException e) {
assertEquals("Failed to extract binary path from GDB, result: ProcessResult { exitStatus=1 output= errors=Error 123 }, command: " +
- "[/bin/sh, -c, /opt/rh/devtoolset-9/root/bin/gdb -n -batch -core /tmp/core.1234 | grep '^Core was generated by']", e.getMessage());
+ "[/bin/sh, -c, /opt/rh/devtoolset-10/root/bin/gdb -n -batch -core /tmp/core.1234 | grep '^Core was generated by']", e.getMessage());
}
}
@Test
public void extractsBacktraceUsingGdb() {
- mockExec(new String[]{GDB_PATH, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
+
+ mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
assertEquals(GDB_BACKTRACE, coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, false));
- mockExec(new String[]{GDB_PATH, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
+ mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
"", "Failure");
try {
coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, false);
fail("Expected not to be able to read backtrace");
} catch (RuntimeException e) {
assertEquals("Failed to read backtrace ProcessResult { exitStatus=1 output= errors=Failure }, Command: " +
- "[/opt/rh/devtoolset-9/root/bin/gdb, -n, -ex, bt, -batch, /usr/bin/program, /tmp/core.1234]", e.getMessage());
+ "[" + GDB_PATH_RHEL7_DT9 + ", -n, -ex, bt, -batch, /usr/bin/program, /tmp/core.1234]", e.getMessage());
}
}
@Test
public void extractsBacktraceFromAllThreadsUsingGdb() {
- mockExec(new String[]{GDB_PATH, "-n", "-ex", "thread apply all bt", "-batch",
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
+
+ mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "thread apply all bt", "-batch",
"/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
assertEquals(GDB_BACKTRACE, coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, true));
@@ -120,9 +130,11 @@ public class CoreCollectorTest {
mockExec(new String[]{"file", TEST_CORE_PATH.toString()},
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " +
"'/usr/bin/program'");
- mockExec(new String[]{GDB_PATH, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory");
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "", "stat: No such file or directory");
+ mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
- mockExec(new String[]{GDB_PATH, "-n", "-ex", "thread apply all bt", "-batch",
+ mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "thread apply all bt", "-batch",
"/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
@@ -138,7 +150,8 @@ public class CoreCollectorTest {
mockExec(new String[]{"file", TEST_CORE_PATH.toString()},
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " +
"'/usr/bin/program'");
- mockExec(new String[]{GDB_PATH + " -n -ex bt -batch /usr/bin/program /tmp/core.1234"},
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
+ mockExec(new String[]{GDB_PATH_RHEL7_DT9 + " -n -ex bt -batch /usr/bin/program /tmp/core.1234"},
"", "Failure");
Map<String, Object> expectedData = Map.of("bin_path", TEST_BIN_PATH.toString());
@@ -149,7 +162,11 @@ public class CoreCollectorTest {
public void reportsJstackInsteadOfGdbForJdkCores() {
mockExec(new String[]{"file", TEST_CORE_PATH.toString()},
"dump.core.5954: ELF 64-bit LSB core file x86-64, version 1 (SYSV), too many program header sections (33172)");
- mockExec(new String[]{"/bin/sh", "-c", GDB_PATH + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
+
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory");
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "", "stat: No such file or directory");
+
+ mockExec(new String[]{"/bin/sh", "-c", GDB_PATH_RHEL8 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
"Core was generated by `" + JDK_PATH + " -Dconfig.id=default/container.11 -XX:+Pre'.");
String jstack = "jstack11";
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
index 3b16ecbcaa9..892372f27e7 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
@@ -102,11 +102,13 @@ class MaintenanceDeployment implements Closeable {
}
private Optional<Mutex> tryLock(ApplicationId application, NodeRepository nodeRepository) {
+ Duration timeout = Duration.ofSeconds(3);
try {
// Use a short lock to avoid interfering with change deployments
- return Optional.of(nodeRepository.nodes().lock(application, Duration.ofSeconds(1)));
+ return Optional.of(nodeRepository.nodes().lock(application, timeout));
}
catch (ApplicationLockException e) {
+ log.log(Level.WARNING, () -> "Could not lock " + application + " for maintenance deployment within " + timeout);
return Optional.empty();
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintainer.java
index fe5cd419b31..0fade7b32f8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintainer.java
@@ -57,8 +57,7 @@ public abstract class NodeRepositoryMaintainer extends Maintainer {
}
@Override
- protected void recordCompletion(String job, Long consecutiveFailures, double successFactor) {
- metric.set("maintenance.consecutiveFailures", consecutiveFailures, metric.createContext(Map.of("job", job)));
+ public void completed(String job, double successFactor) {
metric.set("maintenance.successFactor", successFactor, metric.createContext(Map.of("job", job)));
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
index cdb5202603a..79d6fbfbdcd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
@@ -67,7 +67,6 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
maintainers.add(new ScalingSuggestionsMaintainer(nodeRepository, defaults.scalingSuggestionsInterval, metric));
maintainers.add(new SwitchRebalancer(nodeRepository, defaults.switchRebalancerInterval, metric, deployer));
maintainers.add(new HostEncrypter(nodeRepository, defaults.hostEncrypterInterval, metric));
- maintainers.add(new ParkedExpirer(nodeRepository, defaults.parkedExpirerInterval, metric));
provisionServiceProvider.getLoadBalancerService(nodeRepository)
.map(lbService -> new LoadBalancerExpirer(nodeRepository, defaults.loadBalancerExpirerInterval, lbService, metric))
@@ -120,7 +119,6 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
private final Duration scalingSuggestionsInterval;
private final Duration switchRebalancerInterval;
private final Duration hostEncrypterInterval;
- private final Duration parkedExpirerInterval;
private final NodeFailer.ThrottlePolicy throttlePolicy;
@@ -129,7 +127,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
dynamicProvisionerInterval = Duration.ofMinutes(5);
failedExpirerInterval = Duration.ofMinutes(10);
failGrace = Duration.ofMinutes(30);
- infrastructureProvisionInterval = Duration.ofMinutes(1);
+ infrastructureProvisionInterval = Duration.ofMinutes(3);
loadBalancerExpirerInterval = Duration.ofMinutes(5);
metricsInterval = Duration.ofMinutes(1);
nodeFailerInterval = Duration.ofMinutes(15);
@@ -151,11 +149,10 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
throttlePolicy = NodeFailer.ThrottlePolicy.hosted;
inactiveConfigServerExpiry = Duration.ofMinutes(5);
inactiveControllerExpiry = Duration.ofMinutes(5);
- parkedExpirerInterval = Duration.ofMinutes(30);
if (zone.environment().isProduction() && ! zone.system().isCd()) {
inactiveExpiry = Duration.ofHours(4); // enough time for the application owner to discover and redeploy
- retiredInterval = Duration.ofMinutes(30);
+ retiredInterval = Duration.ofMinutes(15);
dirtyExpiry = Duration.ofHours(2); // enough time to clean the node
retiredExpiry = Duration.ofDays(4); // give up migrating data after 4 days
} else {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirer.java
deleted file mode 100644
index ec7826658e3..00000000000
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirer.java
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.provision.maintenance;
-
-import com.yahoo.config.provision.NodeType;
-import com.yahoo.jdisc.Metric;
-import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.NodeList;
-import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.node.Agent;
-import com.yahoo.vespa.hosted.provision.node.History;
-
-import java.time.Duration;
-import java.time.Instant;
-import java.util.Comparator;
-import java.util.logging.Logger;
-
-/**
- *
- * Expires parked nodes in dynamically provisioned zones.
- * If number of parked hosts exceed MAX_ALLOWED_PARKED_HOSTS, recycle in a queue order
- *
- * @author olaa
- */
-public class ParkedExpirer extends NodeRepositoryMaintainer {
-
- private static final int MAX_ALLOWED_PARKED_HOSTS = 20;
- private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName());
-
- private final NodeRepository nodeRepository;
-
- ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) {
- super(nodeRepository, interval, metric);
- this.nodeRepository = nodeRepository;
- }
-
- @Override
- protected double maintain() {
- if (!nodeRepository.zone().getCloud().dynamicProvisioning())
- return 1.0;
-
- NodeList parkedHosts = nodeRepository.nodes()
- .list(Node.State.parked)
- .nodeType(NodeType.host)
- .not().deprovisioning();
- int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS);
- parkedHosts.sortedBy(Comparator.comparing(this::parkedAt))
- .first(hostsToExpire)
- .forEach(host -> {
- log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname());
- nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer");
- });
-
- return 1.0;
- }
-
- private Instant parkedAt(Node node) {
- return node.history().event(History.Event.Type.parked)
- .map(History.Event::at)
- .orElse(Instant.EPOCH); // Should not happen
- }
-
-}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
index 856d534bbd2..76c8210338e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
@@ -4,15 +4,18 @@ package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.config.provision.NodeType;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
import java.time.Duration;
+import java.time.Instant;
import java.util.List;
/**
* This moves nodes of type {@link NodeType#host} from provisioned to parked if they have been in provisioned too long.
+ * Parked hosts are deprovisioned as well, if too many hosts are being expired.
*
* Only {@link NodeType#host} is moved because any number of nodes of that type can exist. Other node types such as
* {@link NodeType#confighost} have a fixed number and thus cannot be replaced while the fixed number of nodes exist in
@@ -22,17 +25,40 @@ import java.util.List;
*/
public class ProvisionedExpirer extends Expirer {
+ private final NodeRepository nodeRepository;
+ private static final int MAXIMUM_ALLOWED_EXPIRED_HOSTS = 20;
+
ProvisionedExpirer(NodeRepository nodeRepository, Duration dirtyTimeout, Metric metric) {
super(Node.State.provisioned, History.Event.Type.provisioned, nodeRepository, dirtyTimeout, metric);
+ this.nodeRepository = nodeRepository;
}
@Override
protected void expire(List<Node> expired) {
+ int previouslyExpired = numberOfPreviouslyExpired();
for (Node expiredNode : expired) {
- if (expiredNode.type() == NodeType.host) {
- nodeRepository().nodes().parkRecursively(expiredNode.hostname(), Agent.ProvisionedExpirer, "Node is stuck in provisioned");
+ if (expiredNode.type() != NodeType.host)
+ continue;
+ nodeRepository().nodes().parkRecursively(expiredNode.hostname(), Agent.ProvisionedExpirer, "Node is stuck in provisioned");
+ if (MAXIMUM_ALLOWED_EXPIRED_HOSTS < ++previouslyExpired) {
+ nodeRepository.nodes().deprovision(expiredNode.hostname(), Agent.ProvisionedExpirer, nodeRepository.clock().instant());
}
}
}
+ private int numberOfPreviouslyExpired() {
+ return nodeRepository.nodes()
+ .list(Node.State.parked)
+ .nodeType(NodeType.host)
+ .matching(this::parkedByProvisionedExpirer)
+ .not().deprovisioning()
+ .size();
+ }
+
+ private boolean parkedByProvisionedExpirer(Node node) {
+ return node.history().event(History.Event.Type.parked)
+ .map(History.Event::agent)
+ .map(Agent.ProvisionedExpirer::equals)
+ .orElse(false);
+ }
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
index 888f06a5004..b606e40ef42 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
@@ -40,19 +40,15 @@ public class ScalingSuggestionsMaintainer extends NodeRepositoryMaintainer {
if ( ! nodeRepository().zone().environment().isProduction()) return 1.0;
int attempts = 0;
- int successes = 0;
+ int failures = 0;
for (var application : activeNodesByApplication().entrySet()) {
- attempts++;
- successes += suggest(application.getKey(), application.getValue());
+ for (var cluster : nodesByCluster(application.getValue()).entrySet()) {
+ attempts++;
+ if ( ! suggest(application.getKey(), cluster.getKey(), cluster.getValue()))
+ failures++;
+ }
}
- return attempts == 0 ? 1.0 : ((double)successes / attempts);
- }
-
- private int suggest(ApplicationId application, NodeList applicationNodes) {
- int successes = 0;
- for (var cluster : nodesByCluster(applicationNodes).entrySet())
- successes += suggest(application, cluster.getKey(), cluster.getValue()) ? 1 : 0;
- return successes;
+ return asSuccessFactor(attempts, failures);
}
private Applications applications() {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index ec1bfba6996..4d67c83a179 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -516,6 +516,8 @@ public class Nodes {
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
+ if (node.status().wantToRebuild())
+ throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirerTest.java
deleted file mode 100644
index bc60801c1d6..00000000000
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirerTest.java
+++ /dev/null
@@ -1,71 +0,0 @@
-package com.yahoo.vespa.hosted.provision.maintenance;
-
-import com.yahoo.config.provision.Cloud;
-import com.yahoo.config.provision.Environment;
-import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.NodeResources;
-import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.config.provision.SystemName;
-import com.yahoo.config.provision.Zone;
-import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.node.Agent;
-import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
-import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner;
-import org.junit.Test;
-
-import java.time.Duration;
-import java.util.List;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import static org.junit.Assert.*;
-
-/**
- * @author olaa
- */
-public class ParkedExpirerTest {
-
- private ProvisioningTester tester;
-
- @Test
- public void noop_if_not_dynamic_provisioning() {
- tester = getTester(false);
- populateNodeRepo();
-
- var expirer = new ParkedExpirer(tester.nodeRepository(), Duration.ofMinutes(4), new TestMetric());
- expirer.maintain();
-
- assertEquals(0, tester.nodeRepository().nodes().list(Node.State.dirty).size());
- assertEquals(25, tester.nodeRepository().nodes().list(Node.State.parked).size());
- }
-
- @Test
- public void recycles_correct_subset_of_parked_hosts() {
- tester = getTester(true);
- populateNodeRepo();
-
- var expirer = new ParkedExpirer(tester.nodeRepository(), Duration.ofMinutes(4), new TestMetric());
- expirer.maintain();
-
- assertEquals(4, tester.nodeRepository().nodes().list(Node.State.dirty).size());
- assertEquals(21, tester.nodeRepository().nodes().list(Node.State.parked).size());
-
- }
-
- private ProvisioningTester getTester(boolean dynamicProvisioning) {
- var zone = new Zone(Cloud.builder().dynamicProvisioning(dynamicProvisioning).build(), SystemName.main, Environment.prod, RegionName.from("us-east"));
- return new ProvisioningTester.Builder().zone(zone)
- .hostProvisioner(dynamicProvisioning ? new MockHostProvisioner(List.of()) : null)
- .build();
- }
-
- private void populateNodeRepo() {
- var nodes = IntStream.range(0, 25)
- .mapToObj(i -> Node.create("id-" + i, "host-" + i, new Flavor(NodeResources.unspecified()), Node.State.parked, NodeType.host).build())
- .collect(Collectors.toList());
- tester.nodeRepository().database().addNodesInState(nodes, Node.State.parked, Agent.system);
- tester.nodeRepository().nodes().deprovision(nodes.get(0).hostname(), Agent.system, tester.clock().instant()); // Deprovisioning host is not recycled
- }
-
-}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java
new file mode 100644
index 00000000000..786faae24b4
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java
@@ -0,0 +1,50 @@
+package com.yahoo.vespa.hosted.provision.maintenance;
+
+import com.yahoo.config.provision.Cloud;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeResources;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner;
+import org.junit.Test;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static org.junit.Assert.*;
+
+/**
+ * @author olaa
+ */
+public class ProvisionedExpirerTest {
+
+ private ProvisioningTester tester;
+
+ @Test
+ public void deprovisions_hosts_if_excessive_expiry() {
+ tester = new ProvisioningTester.Builder().build();
+ populateNodeRepo();
+
+ tester.clock().advance(Duration.ofMinutes(5));
+ new ProvisionedExpirer(tester.nodeRepository(), Duration.ofMinutes(4), new TestMetric()).maintain();
+
+ assertEquals(5, tester.nodeRepository().nodes().list().deprovisioning().size());
+ assertEquals(20, tester.nodeRepository().nodes().list().not().deprovisioning().size());
+ }
+
+ private void populateNodeRepo() {
+ var nodes = IntStream.range(0, 25)
+ .mapToObj(i -> Node.create("id-" + i, "host-" + i, new Flavor(NodeResources.unspecified()), Node.State.provisioned, NodeType.host).build())
+ .collect(Collectors.toList());
+ tester.nodeRepository().database().addNodesInState(nodes, Node.State.provisioned, Agent.system);
+ }
+
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
index a28c11d009f..dd16d4674ad 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
@@ -243,6 +243,13 @@ public class NodesV2ApiTest {
new byte[0], Request.Method.DELETE),
"{\"message\":\"Removed dockerhost1.yahoo.com\"}");
// ... and then forget it completely
+ tester.assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com",
+ new byte[0], Request.Method.DELETE),
+ 400,
+ "{\"error-code\":\"BAD_REQUEST\",\"message\":\"deprovisioned host dockerhost1.yahoo.com is rebuilding and cannot be forgotten\"}");
+ assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com",
+ Utf8.toBytes("{\"wantToRebuild\": false}"), Request.Method.PATCH),
+ "{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com",
new byte[0], Request.Method.DELETE),
"{\"message\":\"Permanently removed dockerhost1.yahoo.com\"}");
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
index 26d711945c6..72224ef3cba 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
@@ -43,9 +43,6 @@
"name": "OsUpgradeActivator"
},
{
- "name": "ParkedExpirer"
- },
- {
"name": "PeriodicApplicationMaintainer"
},
{
diff --git a/screwdriver/build-vespa.sh b/screwdriver/build-vespa.sh
index 4480b33e6f9..91375728ca9 100755
--- a/screwdriver/build-vespa.sh
+++ b/screwdriver/build-vespa.sh
@@ -6,7 +6,7 @@ set -e
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd )"
readonly NUM_THREADS=$(( $(nproc) + 2 ))
-source /etc/profile.d/enable-devtoolset-9.sh
+source /etc/profile.d/enable-devtoolset-10.sh
source /etc/profile.d/enable-rh-maven35.sh
export MALLOC_ARENA_MAX=1
@@ -52,9 +52,9 @@ esac
if [[ $SHOULD_BUILD == systemtest ]]; then
yum -y --setopt=skip_missing_names_on_install=False install \
zstd \
- devtoolset-9-gcc-c++ \
- devtoolset-9-libatomic-devel \
- devtoolset-9-binutils \
+ devtoolset-10-gcc-c++ \
+ devtoolset-10-libatomic-devel \
+ devtoolset-10-binutils \
libxml2-devel \
rh-ruby27-rubygems-devel \
rh-ruby27-ruby-devel \
diff --git a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
index 463a7b164e1..3013e8f38d1 100644
--- a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
@@ -19,6 +19,8 @@
#include <vespa/searchcore/proton/server/document_db_explorer.h>
#include <vespa/searchcore/proton/server/documentdb.h>
#include <vespa/searchcore/proton/server/documentdbconfigmanager.h>
+#include <vespa/searchcore/proton/server/feedhandler.h>
+#include <vespa/searchcore/proton/server/fileconfigmanager.h>
#include <vespa/searchcore/proton/server/memoryconfigstore.h>
#include <vespa/persistence/dummyimpl/dummy_bucket_executor.h>
#include <vespa/searchcorespi/index/indexflushtarget.h>
@@ -28,7 +30,10 @@
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/util/size_literals.h>
#include <vespa/config-bucketspaces.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/testkit/test_kit.h>
+#include <iostream>
using namespace cloud::config::filedistribution;
using namespace proton;
@@ -39,6 +44,7 @@ using document::DocumentType;
using document::DocumentTypeRepo;
using document::DocumenttypesConfig;
using document::test::makeBucketSpace;
+using search::SerialNum;
using search::TuneFileDocumentDB;
using search::index::DummyFileHeaderContext;
using search::index::Schema;
@@ -51,6 +57,24 @@ using vespalib::Slime;
namespace {
+void
+cleanup_dirs(bool file_config)
+{
+ vespalib::rmdir("typea", true);
+ vespalib::rmdir("tmp", true);
+ if (file_config) {
+ vespalib::rmdir("config", true);
+ }
+}
+
+vespalib::string
+config_subdir(SerialNum serialNum)
+{
+ vespalib::asciistream os;
+ os << "config/config-" << serialNum;
+ return os.str();
+}
+
struct MyDBOwner : public DummyDBOwner
{
std::shared_ptr<DocumentDBReferenceRegistry> _registry;
@@ -67,7 +91,30 @@ MyDBOwner::MyDBOwner()
{}
MyDBOwner::~MyDBOwner() = default;
-struct Fixture {
+struct FixtureBase {
+ bool _cleanup;
+ bool _file_config;
+ FixtureBase(bool file_config);
+ ~FixtureBase();
+ void disable_cleanup() { _cleanup = false; }
+};
+
+FixtureBase::FixtureBase(bool file_config)
+ : _cleanup(true),
+ _file_config(file_config)
+{
+ vespalib::mkdir("typea");
+}
+
+
+FixtureBase::~FixtureBase()
+{
+ if (_cleanup) {
+ cleanup_dirs(_file_config);
+ }
+}
+
+struct Fixture : public FixtureBase {
DummyWireService _dummy;
MyDBOwner _myDBOwner;
vespalib::ThreadStackExecutor _summaryExecutor;
@@ -79,12 +126,20 @@ struct Fixture {
matching::QueryLimiter _queryLimiter;
vespalib::Clock _clock;
+ std::unique_ptr<ConfigStore> make_config_store();
Fixture();
+ Fixture(bool file_config);
~Fixture();
};
Fixture::Fixture()
- : _dummy(),
+ : Fixture(false)
+{
+}
+
+Fixture::Fixture(bool file_config)
+ : FixtureBase(file_config),
+ _dummy(),
_myDBOwner(),
_summaryExecutor(8, 128_Ki),
_hwInfo(),
@@ -111,13 +166,25 @@ Fixture::Fixture()
_db = DocumentDB::create(".", mgr.getConfig(), "tcp/localhost:9014", _queryLimiter, _clock, DocTypeName("typea"),
makeBucketSpace(),
*b->getProtonConfigSP(), _myDBOwner, _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _dummy,
- _fileHeaderContext, std::make_unique<MemoryConfigStore>(),
+ _fileHeaderContext, make_config_store(),
std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), _hwInfo);
_db->start();
_db->waitForOnlineState();
}
-Fixture::~Fixture() = default;
+Fixture::~Fixture()
+{
+}
+
+std::unique_ptr<ConfigStore>
+Fixture::make_config_store()
+{
+ if (_file_config) {
+ return std::make_unique<FileConfigManager>("config", "", "typea");
+ } else {
+ return std::make_unique<MemoryConfigStore>();
+ }
+}
const IFlushTarget *
extractRealFlushTarget(const IFlushTarget *target)
@@ -249,11 +316,56 @@ TEST_F("require that document db registers reference", Fixture)
EXPECT_EQUAL(search::attribute::BasicType::INT32, attrReadGuard->attribute()->getBasicType());
}
+TEST("require that normal restart works")
+{
+ {
+ Fixture f(true);
+ f.disable_cleanup();
+ }
+ {
+ Fixture f(true);
+ }
+}
+
+TEST("require that resume after interrupted save config works")
+{
+ SerialNum serialNum = 0;
+ {
+ Fixture f(true);
+ f.disable_cleanup();
+ serialNum = f._db->getFeedHandler().getSerialNum();
+ }
+ {
+ /*
+ * Simulate interrupted save config by copying best config to
+ * serial number after end of transaction log
+ */
+ std::cout << "Replay end serial num is " << serialNum << std::endl;
+ search::IndexMetaInfo info("config");
+ ASSERT_TRUE(info.load());
+ auto best_config_snapshot = info.getBestSnapshot();
+ ASSERT_TRUE(best_config_snapshot.valid);
+ std::cout << "Best config serial is " << best_config_snapshot.syncToken << std::endl;
+ auto old_config_subdir = config_subdir(best_config_snapshot.syncToken);
+ auto new_config_subdir = config_subdir(serialNum + 1);
+ vespalib::mkdir(new_config_subdir);
+ auto config_files = vespalib::listDirectory(old_config_subdir);
+ for (auto &config_file : config_files) {
+ vespalib::copy(old_config_subdir + "/" + config_file, new_config_subdir + "/" + config_file, false, false);
+ }
+ info.addSnapshot({true, serialNum + 1, new_config_subdir.substr(new_config_subdir.rfind('/') + 1)});
+ info.save();
+ }
+ {
+ Fixture f(true);
+ }
+}
+
} // namespace
TEST_MAIN() {
+ cleanup_dirs(true);
DummyFileHeaderContext::setCreator("documentdb_test");
- FastOS_File::MakeDirectory("typea");
TEST_RUN_ALL();
- FastOS_FileInterface::EmptyAndRemoveDirectory("typea");
+ cleanup_dirs(true);
}
diff --git a/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp b/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp
index cff44631c6c..f918ebe9179 100644
--- a/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp
+++ b/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp
@@ -159,7 +159,7 @@ TEST_F("require that more disk bloat is allowed while node state is retired", Fi
f.notifyDiskMemUsage(ResourceUsageState(0.7, 0.3), belowLimit());
TEST_DO(f.assertStrategyDiskConfig(0.2, 0.2));
f.setNodeRetired(true);
- TEST_DO(f.assertStrategyDiskConfig((0.8 - 0.3 / 0.7) * 0.8, 1.0));
+ TEST_DO(f.assertStrategyDiskConfig((0.8 - ((0.3/0.7)*(1 - 0.2))) / 0.8, 1.0));
f.notifyDiskMemUsage(belowLimit(), belowLimit());
TEST_DO(f.assertStrategyDiskConfig(0.2, 0.2));
}
diff --git a/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp b/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp
index 55e9ce16f70..01dd069b03c 100644
--- a/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp
+++ b/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp
@@ -4,17 +4,15 @@
#include <vespa/searchlib/aggregation/predicates.h>
#include <vespa/searchlib/aggregation/modifiers.h>
-namespace search {
+namespace search::grouping {
using aggregation::CountFS4Hits;
using aggregation::FS4HitSetDistributionKey;
-namespace grouping {
-
void
GroupingContext::deserialize(const char *groupSpec, uint32_t groupSpecLen)
{
- if ((groupSpec != NULL) && (groupSpecLen > 4)) {
+ if ((groupSpec != nullptr) && (groupSpecLen > 4)) {
vespalib::nbostream is(groupSpec, groupSpecLen);
vespalib::NBOSerializer nis(is);
uint32_t numGroupings = 0;
@@ -102,6 +100,4 @@ GroupingContext::needRanking() const
return true;
}
-
-} // namespace search::grouping
-} // namespace search
+}
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
index aa633536419..e53e817af8d 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
@@ -632,8 +632,9 @@ DocumentDB::saveInitialConfig(const DocumentDBConfig &configSnapshot)
// Only called from ctor
lock_guard guard(_configMutex);
- if (_config_store->getBestSerialNum() != 0)
+ if (_config_store->getBestSerialNum() != 0) {
return; // Initial config already present
+ }
SerialNum confSerial = _feedHandler->inc_replay_end_serial_num();
_feedHandler->setSerialNum(confSerial);
@@ -658,16 +659,17 @@ void
DocumentDB::resumeSaveConfig()
{
SerialNum bestSerial = _config_store->getBestSerialNum();
- if (bestSerial == 0)
- return;
- if (bestSerial != _feedHandler->get_replay_end_serial_num() + 1)
+ assert(bestSerial != 0);
+ if (bestSerial != _feedHandler->get_replay_end_serial_num() + 1) {
return;
+ }
+ LOG(warning, "DocumentDB(%s): resumeSaveConfig() resuming save config for serial %" PRIu64,
+ _docTypeName.toString().c_str(), bestSerial);
// proton was interrupted when saving later config.
SerialNum confSerial = _feedHandler->inc_replay_end_serial_num();
- _feedHandler->setSerialNum(confSerial);
+ assert(confSerial == bestSerial);
// resume operation, i.e. save config entry in transaction log
NewConfigOperation op(confSerial, *_config_store);
- op.setSerialNum(_feedHandler->inc_replay_end_serial_num());
(void) _feedHandler->storeOperationSync(op);
sync(op.getSerialNum());
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
index 4b862b40896..04aea64fbd4 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
@@ -4,8 +4,6 @@
#include "bootstrapconfig.h"
#include <vespa/searchcore/proton/common/hw_info_sampler.h>
#include <vespa/config/print/fileconfigwriter.h>
-#include <vespa/config/print/fileconfigsnapshotreader.h>
-#include <vespa/config/print/fileconfigsnapshotwriter.h>
#include <vespa/config-bucketspaces.h>
#include <vespa/document/repo/document_type_repo_factory.h>
#include <vespa/searchcommon/common/schemaconfigurer.h>
@@ -42,7 +40,8 @@ using vespa::config::search::summary::JuniperrcConfig;
using vespa::config::content::core::BucketspacesConfig;
using vespalib::nbostream;
-typedef IndexMetaInfo::SnapshotList SnapshotList;
+using SnapshotList = IndexMetaInfo::SnapshotList;
+using Snapshot = IndexMetaInfo::Snapshot;
using namespace std::chrono_literals;
namespace proton {
@@ -74,9 +73,7 @@ fsyncFile(const vespalib::string &fileName)
template <class Config>
void
-saveHelper(const vespalib::string &snapDir,
- const vespalib::string &name,
- const Config &config)
+saveHelper(const vespalib::string &snapDir, const vespalib::string &name, const Config &config)
{
vespalib::string fileName(snapDir + "/" + name + ".cfg");
config::FileConfigWriter writer(fileName);
@@ -105,8 +102,7 @@ public:
ConfigFile();
~ConfigFile();
- ConfigFile(const vespalib::string &name,
- const vespalib::string &fullName);
+ ConfigFile(const vespalib::string &name, const vespalib::string &fullName);
nbostream &serialize(nbostream &stream) const;
nbostream &deserialize(nbostream &stream);
@@ -122,8 +118,7 @@ ConfigFile::ConfigFile()
ConfigFile::~ConfigFile() = default;
-ConfigFile::ConfigFile(const vespalib::string &name,
- const vespalib::string &fullName)
+ConfigFile::ConfigFile(const vespalib::string &name, const vespalib::string &fullName)
: _name(name),
_modTime(0),
_content()
@@ -142,7 +137,7 @@ ConfigFile::ConfigFile(const vespalib::string &name,
nbostream &
ConfigFile::serialize(nbostream &stream) const
{
- assert(strchr(_name.c_str(), '/') == NULL);
+ assert(strchr(_name.c_str(), '/') == nullptr);
stream << _name;
stream << static_cast<int64_t>(_modTime);;
uint32_t sz = _content.size();
@@ -155,7 +150,7 @@ nbostream &
ConfigFile::deserialize(nbostream &stream)
{
stream >> _name;
- assert(strchr(_name.c_str(), '/') == NULL);
+ assert(strchr(_name.c_str(), '/') == nullptr);
int64_t modTime;
stream >> modTime;
_modTime = modTime;
@@ -255,8 +250,7 @@ FileConfigManager::getOldestSerialNum() const
}
void
-FileConfigManager::saveConfig(const DocumentDBConfig &snapshot,
- SerialNum serialNum)
+FileConfigManager::saveConfig(const DocumentDBConfig &snapshot, SerialNum serialNum)
{
if (getBestSerialNum() >= serialNum) {
LOG(warning, "Config for serial >= %" PRIu64 " already saved",
@@ -318,8 +312,7 @@ void addEmptyFile(vespalib::string snapDir, vespalib::string fileName)
}
void
-FileConfigManager::loadConfig(const DocumentDBConfig &currentSnapshot,
- search::SerialNum serialNum,
+FileConfigManager::loadConfig(const DocumentDBConfig &currentSnapshot, search::SerialNum serialNum,
DocumentDBConfig::SP &loadedSnapshot)
{
vespalib::string snapDirBaseName(makeSnapDirBaseName(serialNum));
@@ -333,13 +326,14 @@ FileConfigManager::loadConfig(const DocumentDBConfig &currentSnapshot,
DocumentDBConfigHelper dbc(spec, _docTypeName);
- typedef DocumenttypesConfig DTC;
- typedef DocumentDBConfig::DocumenttypesConfigSP DTCSP;
- DTCSP docTypesCfg(config::ConfigGetter<DTC>::getConfig("", spec).release());
+ using DTC = DocumenttypesConfig;
+ using DTCSP = DocumentDBConfig::DocumenttypesConfigSP;
+ DTCSP docTypesCfg = config::ConfigGetter<DTC>::getConfig("", spec);
std::shared_ptr<const DocumentTypeRepo> repo;
if (currentSnapshot.getDocumenttypesConfigSP() &&
currentSnapshot.getDocumentTypeRepoSP() &&
- currentSnapshot.getDocumenttypesConfig() == *docTypesCfg) {
+ (currentSnapshot.getDocumenttypesConfig() == *docTypesCfg))
+ {
docTypesCfg = currentSnapshot.getDocumenttypesConfigSP();
repo = currentSnapshot.getDocumentTypeRepoSP();
} else {
@@ -462,8 +456,7 @@ FileConfigManager::serializeConfig(SerialNum serialNum, nbostream &stream)
uint32_t numConfigs = configs.size();
stream << numConfigs;
for (const auto &config : configs) {
- ConfigFile file(config,
- snapDir + "/" + config);
+ ConfigFile file(config, snapDir + "/" + config);
stream << file;
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h
index 1c477ffd3c8..d58d7920c67 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h
+++ b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h
@@ -10,17 +10,12 @@
namespace proton {
class FileConfigManager : public ConfigStore {
-public:
- typedef std::unique_ptr<FileConfigManager> UP;
- typedef std::shared_ptr<FileConfigManager> SP;
- typedef search::IndexMetaInfo::Snapshot Snapshot;
-
private:
- vespalib::string _baseDir;
- vespalib::string _configId;
- vespalib::string _docTypeName;
+ vespalib::string _baseDir;
+ vespalib::string _configId;
+ vespalib::string _docTypeName;
search::IndexMetaInfo _info;
- ProtonConfigSP _protonConfig;
+ ProtonConfigSP _protonConfig;
public:
/**
@@ -33,14 +28,12 @@ public:
const vespalib::string &configId,
const vespalib::string &docTypeName);
- virtual
- ~FileConfigManager();
+ ~FileConfigManager() override;
- virtual SerialNum getBestSerialNum() const override;
- virtual SerialNum getOldestSerialNum() const override;
+ SerialNum getBestSerialNum() const override;
+ SerialNum getOldestSerialNum() const override;
- virtual void saveConfig(const DocumentDBConfig &snapshot,
- SerialNum serialNum) override;
+ void saveConfig(const DocumentDBConfig &snapshot, SerialNum serialNum) override;
/**
* Load a config snapshot from disk corresponding to the given
@@ -53,23 +46,21 @@ public:
* @param loadedSnapshot the shared pointer in which to store the
* resulting config snapshot.
*/
- virtual void loadConfig(const DocumentDBConfig &currentSnapshot,
- SerialNum serialNum,
- DocumentDBConfig::SP &loadedSnapshot) override;
+ void loadConfig(const DocumentDBConfig &currentSnapshot, SerialNum serialNum,
+ DocumentDBConfig::SP &loadedSnapshot) override;
- virtual void removeInvalid() override;
- virtual void prune(SerialNum serialNum) override;
- virtual bool hasValidSerial(SerialNum serialNum) const override;
+ void removeInvalid() override;
+ void prune(SerialNum serialNum) override;
+ bool hasValidSerial(SerialNum serialNum) const override;
- virtual SerialNum getPrevValidSerial(SerialNum serialNum) const override;
+ SerialNum getPrevValidSerial(SerialNum serialNum) const override;
/**
* Serialize config files.
*
* Used for serializing config into transaction log.
*/
- virtual void
- serializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
+ void serializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
/**
@@ -80,10 +71,9 @@ public:
* takes precedence over the serialized config files in the
* transaction log.
*/
- virtual void
- deserializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
+ void deserializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
- virtual void setProtonConfig(const ProtonConfigSP &protonConfig) override;
+ void setProtonConfig(const ProtonConfigSP &protonConfig) override;
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp
index 88e2096aa63..cf51c7be518 100644
--- a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp
@@ -13,7 +13,8 @@ namespace {
bool
shouldUseConservativeMode(const ResourceUsageState &resourceState,
bool currentlyUseConservativeMode,
- double lowWatermarkFactor) {
+ double lowWatermarkFactor)
+{
return resourceState.aboveLimit() ||
(currentlyUseConservativeMode && resourceState.aboveLimit(lowWatermarkFactor));
}
@@ -21,8 +22,7 @@ shouldUseConservativeMode(const ResourceUsageState &resourceState,
}
void
-MemoryFlushConfigUpdater::considerUseConservativeDiskMode(const LockGuard &guard,
- MemoryFlush::Config &newConfig)
+MemoryFlushConfigUpdater::considerUseConservativeDiskMode(const LockGuard &guard, MemoryFlush::Config &newConfig)
{
if (shouldUseConservativeMode(_currState.diskState(), _useConservativeDiskMode,
_currConfig.conservative.lowwatermarkfactor))
@@ -38,8 +38,7 @@ MemoryFlushConfigUpdater::considerUseConservativeDiskMode(const LockGuard &guard
}
void
-MemoryFlushConfigUpdater::considerUseConservativeMemoryMode(const LockGuard &,
- MemoryFlush::Config &newConfig)
+MemoryFlushConfigUpdater::considerUseConservativeMemoryMode(const LockGuard &, MemoryFlush::Config &newConfig)
{
if (shouldUseConservativeMode(_currState.memoryState(), _useConservativeMemoryMode,
_currConfig.conservative.lowwatermarkfactor))
@@ -59,18 +58,29 @@ MemoryFlushConfigUpdater::considerUseRelaxedDiskMode(const LockGuard &, MemoryFl
double bloatMargin = _currConfig.conservative.lowwatermarkfactor - utilization;
if (bloatMargin > 0.0) {
// Node retired and disk utiliation is below low mater mark factor.
+ // Compute how much of disk is occupied by live data, give that bloat is maxed,
+ // which is normally the case in a system that has been running for a while.
+ double spaceUtilization = utilization * (1 - _currConfig.diskbloatfactor);
+ // Then compute how much bloat can allowed given the current space usage and still stay below low watermark
+ double targetBloat = (_currConfig.conservative.lowwatermarkfactor - spaceUtilization) / _currConfig.conservative.lowwatermarkfactor;
newConfig.diskBloatFactor = 1.0;
- newConfig.globalDiskBloatFactor = std::max(bloatMargin * 0.8, _currConfig.diskbloatfactor);
+ newConfig.globalDiskBloatFactor = std::max(targetBloat, _currConfig.diskbloatfactor);
}
}
void
-MemoryFlushConfigUpdater::updateFlushStrategy(const LockGuard &guard)
+MemoryFlushConfigUpdater::updateFlushStrategy(const LockGuard &guard, const char * why)
{
MemoryFlush::Config newConfig = convertConfig(_currConfig, _memory);
considerUseConservativeDiskMode(guard, newConfig);
considerUseConservativeMemoryMode(guard, newConfig);
_flushStrategy->setConfig(newConfig);
+ LOG(info, "Due to %s (conservative-disk=%d, conservative-memory=%d, retired=%d) flush config updated to "
+ "global-disk-bloat(%1.2f), max-tls-size(%" PRIu64 "),"
+ "max-global-memory(%" PRIu64 "), max-memory-gain(%" PRIu64 ")",
+ why, _useConservativeDiskMode, _useConservativeMemoryMode, _nodeRetired,
+ newConfig.globalDiskBloatFactor, newConfig.maxGlobalTlsSize,
+ newConfig.maxGlobalMemory, newConfig.maxMemoryGain);
}
MemoryFlushConfigUpdater::MemoryFlushConfigUpdater(const MemoryFlush::SP &flushStrategy,
@@ -92,7 +102,7 @@ MemoryFlushConfigUpdater::setConfig(const ProtonConfig::Flush::Memory &newConfig
{
LockGuard guard(_mutex);
_currConfig = newConfig;
- updateFlushStrategy(guard);
+ updateFlushStrategy(guard, "new config");
}
void
@@ -100,7 +110,7 @@ MemoryFlushConfigUpdater::notifyDiskMemUsage(DiskMemUsageState newState)
{
LockGuard guard(_mutex);
_currState = newState;
- updateFlushStrategy(guard);
+ updateFlushStrategy(guard, "disk-mem-usage update");
}
void
@@ -108,7 +118,7 @@ MemoryFlushConfigUpdater::setNodeRetired(bool nodeRetired)
{
LockGuard guard(_mutex);
_nodeRetired = nodeRetired;
- updateFlushStrategy(guard);
+ updateFlushStrategy(guard, nodeRetired ? "node retired" : "node unretired");
}
namespace {
@@ -122,8 +132,7 @@ getHardMemoryLimit(const HwInfo::Memory &memory)
}
MemoryFlush::Config
-MemoryFlushConfigUpdater::convertConfig(const ProtonConfig::Flush::Memory &config,
- const HwInfo::Memory &memory)
+MemoryFlushConfigUpdater::convertConfig(const ProtonConfig::Flush::Memory &config, const HwInfo::Memory &memory)
{
const size_t hardMemoryLimit = getHardMemoryLimit(memory);
size_t totalMaxMemory = config.maxmemory;
diff --git a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h
index 28ee330689d..c19074c288f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h
+++ b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h
@@ -21,23 +21,20 @@ private:
using LockGuard = std::lock_guard<Mutex>;
using ProtonConfig = vespa::config::search::core::ProtonConfig;
- Mutex _mutex;
- MemoryFlush::SP _flushStrategy;
+ Mutex _mutex;
+ MemoryFlush::SP _flushStrategy;
ProtonConfig::Flush::Memory _currConfig;
- HwInfo::Memory _memory;
- DiskMemUsageState _currState;
- bool _useConservativeDiskMode;
- bool _useConservativeMemoryMode;
- bool _nodeRetired;
+ HwInfo::Memory _memory;
+ DiskMemUsageState _currState;
+ bool _useConservativeDiskMode;
+ bool _useConservativeMemoryMode;
+ bool _nodeRetired;
- void considerUseConservativeDiskMode(const LockGuard &guard,
- MemoryFlush::Config &newConfig);
- void considerUseConservativeMemoryMode(const LockGuard &guard,
- MemoryFlush::Config &newConfig);
- void considerUseRelaxedDiskMode(const LockGuard &guard,
- MemoryFlush::Config &newConfig);
- void updateFlushStrategy(const LockGuard &guard);
+ void considerUseConservativeDiskMode(const LockGuard &guard, MemoryFlush::Config &newConfig);
+ void considerUseConservativeMemoryMode(const LockGuard &guard, MemoryFlush::Config &newConfig);
+ void considerUseRelaxedDiskMode(const LockGuard &guard, MemoryFlush::Config &newConfig);
+ void updateFlushStrategy(const LockGuard &guard, const char * why);
public:
using UP = std::unique_ptr<MemoryFlushConfigUpdater>;
@@ -47,7 +44,7 @@ public:
const HwInfo::Memory &memory);
void setConfig(const ProtonConfig::Flush::Memory &newConfig);
void setNodeRetired(bool nodeRetired);
- virtual void notifyDiskMemUsage(DiskMemUsageState newState) override;
+ void notifyDiskMemUsage(DiskMemUsageState newState) override;
static MemoryFlush::Config convertConfig(const ProtonConfig::Flush::Memory &config,
const HwInfo::Memory &memory);
diff --git a/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp b/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp
index 2e83d2acbf2..8d202100699 100644
--- a/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp
+++ b/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp
@@ -95,6 +95,7 @@ struct ExecFixture
attrs.push_back(AttributeFactory::createAttribute("astr", AVC(AVBT::STRING, AVCT::ARRAY)));
attrs.push_back(AttributeFactory::createAttribute("aint", AVC(AVBT::INT32, AVCT::ARRAY)));
attrs.push_back(AttributeFactory::createAttribute("wsstr", AVC(AVBT::STRING, AVCT::WSET)));
+ attrs.push_back(AttributeFactory::createAttribute("sint", AVC(AVBT::INT32, AVCT::SINGLE)));
for (const auto &attr : attrs) {
attr->addReservedDoc();
@@ -112,6 +113,9 @@ struct ExecFixture
aint->append(1, 3, 0);
aint->append(1, 5, 0);
aint->append(1, 7, 0);
+
+ IntegerAttribute *sint = static_cast<IntegerAttribute *>(attrs[3].get());
+ sint->update(1, 5);
for (const auto &attr : attrs) {
attr->commit();
@@ -167,6 +171,20 @@ TEST_F("require that array attribute can be converted to tensor (explicit dimens
.add({{"dim", "5"}}, 1)), f.execute());
}
+TEST_F("require that single-value integer attribute can be converted to tensor (default dimension)",
+ ExecFixture("tensorFromLabels(attribute(sint))"))
+{
+ EXPECT_EQUAL(*make_tensor(TensorSpec("tensor(sint{})")
+ .add({{"sint", "5"}}, 1)), f.execute());
+}
+
+TEST_F("require that single-value integer attribute can be converted to tensor (explicit dimension)",
+ ExecFixture("tensorFromLabels(attribute(sint),foobar)"))
+{
+ EXPECT_EQUAL(*make_tensor(TensorSpec("tensor(foobar{})")
+ .add({{"foobar", "5"}}, 1)), f.execute());
+}
+
TEST_F("require that empty tensor is created if attribute does not exists",
ExecFixture("tensorFromLabels(attribute(null))"))
{
diff --git a/searchlib/src/vespa/searchlib/aggregation/group.h b/searchlib/src/vespa/searchlib/aggregation/group.h
index 5b425de24e6..681cda43afa 100644
--- a/searchlib/src/vespa/searchlib/aggregation/group.h
+++ b/searchlib/src/vespa/searchlib/aggregation/group.h
@@ -232,7 +232,7 @@ public:
/**
* Recursively checks if any itself or any children needs a full resort.
- * Then all hits must be processed and should be doen before any hit sorting.
+ * Then all hits must be processed and should be done before any hit sorting.
*/
bool needResort() const { return _aggr.needResort(); }
diff --git a/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp b/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp
index 837c38eb340..25bc754a86f 100644
--- a/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp
+++ b/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp
@@ -5,7 +5,6 @@
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/guard.h>
#include <cassert>
-#include <algorithm>
#include <vespa/log/log.h>
LOG_SETUP(".indexmetainfo");
@@ -14,13 +13,13 @@ namespace {
class Parser {
private:
- vespalib::string _name;
+ vespalib::string _name;
vespalib::FilePointer _file;
uint32_t _line;
char _buf[2048];
bool _error;
- vespalib::string _lastKey;
- vespalib::string _lastValue;
+ vespalib::string _lastKey;
+ vespalib::string _lastValue;
uint32_t _lastIdx;
bool _matched;
@@ -44,8 +43,7 @@ public:
return false;
}
bool illegalLine() {
- LOG(warning, "%s:%d: illegal line: %s",
- _name.c_str(), _line, _buf);
+ LOG(warning, "%s:%d: illegal line: %s", _name.c_str(), _line, _buf);
_error = true;
return false;
}
@@ -57,8 +55,7 @@ public:
}
bool illegalValue() {
LOG(warning, "%s:%d: illegal value for '%s': %s",
- _name.c_str(), _line, _lastKey.c_str(),
- _lastValue.c_str());
+ _name.c_str(), _line, _lastKey.c_str(), _lastValue.c_str());
_error = true;
return false;
}
@@ -79,7 +76,7 @@ public:
if (!_file.valid()) {
return openFailed();
}
- if (fgets(_buf, sizeof(_buf), _file) == NULL) {
+ if (fgets(_buf, sizeof(_buf), _file) == nullptr) {
return false; // EOF
}
++_line;
@@ -88,7 +85,7 @@ public:
_buf[--len] = '\0';
}
char *split = strchr(_buf, '=');
- if (split == NULL || (split - _buf) == 0) {
+ if (split == nullptr || (split - _buf) == 0) {
return illegalLine();
}
_lastKey = vespalib::string(_buf, split - _buf);
@@ -119,9 +116,9 @@ public:
void parseInt64(const vespalib::string &k, uint64_t &v) {
if (!_matched && !_error && _lastKey == k) {
_matched = true;
- char *end = NULL;
+ char *end = nullptr;
uint64_t val = strtoull(_lastValue.c_str(), &end, 10);
- if (end == NULL || *end != '\0' ||
+ if (end == nullptr || *end != '\0' ||
val == static_cast<uint64_t>(-1)) {
illegalValue();
return;
@@ -141,10 +138,10 @@ public:
if (dot2 == vespalib::string::npos) {
return illegalArrayKey();
}
- char *end = NULL;
+ char *end = nullptr;
const char *pt = _lastKey.c_str() + name.length() + 1;
uint32_t val = strtoul(pt, &end, 10);
- if (end == NULL || end == pt || *end != '.'
+ if (end == nullptr || end == pt || *end != '.'
|| val > size || size > val + 1)
{
return illegalArrayKey();
@@ -200,7 +197,7 @@ IndexMetaInfo::IndexMetaInfo(const vespalib::string &path)
{
}
-IndexMetaInfo::~IndexMetaInfo() {}
+IndexMetaInfo::~IndexMetaInfo() = default;
IndexMetaInfo::Snapshot
IndexMetaInfo::getBestSnapshot() const
@@ -209,11 +206,7 @@ IndexMetaInfo::getBestSnapshot() const
while (idx >= 0 && !_snapshots[idx].valid) {
--idx;
}
- if (idx >= 0) {
- return _snapshots[idx];
- } else {
- return Snapshot();
- }
+ return (idx >= 0) ? _snapshots[idx] : Snapshot();
}
@@ -233,7 +226,7 @@ bool
IndexMetaInfo::addSnapshot(const Snapshot &snap)
{
if (snap.dirName.empty()
- || findSnapshot(snap.syncToken) != _snapshots.end())
+ || (findSnapshot(snap.syncToken) != _snapshots.end()))
{
return false;
}
@@ -324,32 +317,23 @@ IndexMetaInfo::save(const vespalib::string &baseName)
fprintf(f, "snapshot.%d.dirName=%s\n", i, snap.dirName.c_str());
}
if (ferror(f) != 0) {
- LOG(error,
- "Could not write to file %s",
- newName.c_str());
+ LOG(error, "Could not write to file %s", newName.c_str());
return false;
}
if (fflush(f) != 0) {
- LOG(error,
- "Could not flush file %s",
- newName.c_str());
+ LOG(error, "Could not flush file %s", newName.c_str());
return false;
}
if (fsync(fileno(f)) != 0) {
- LOG(error,
- "Could not fsync file %s",
- newName.c_str());
+ LOG(error, "Could not fsync file %s", newName.c_str());
return false;
}
if (fclose(f.release()) != 0) {
- LOG(error,
- "Could not close file %s",
- newName.c_str());
+ LOG(error, "Could not close file %s", newName.c_str());
return false;
}
if (rename(newName.c_str(), fileName.c_str()) != 0) {
- LOG(warning, "could not rename: %s->%s",
- newName.c_str(), fileName.c_str());
+ LOG(warning, "could not rename: %s->%s", newName.c_str(), fileName.c_str());
return false;
}
vespalib::File::sync(vespalib::dirname(fileName));
diff --git a/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp b/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
index 76a6e908fcb..24e06cfe639 100644
--- a/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
@@ -45,7 +45,7 @@ TensorFromLabelsBlueprint::setup(const search::fef::IIndexEnvironment &env,
_dimension = _sourceParam;
}
describeOutput("tensor",
- "The tensor created from the given array source (attribute field or query parameter)",
+ "The tensor created from the given source (attribute field or query parameter)",
FeatureType::object(ValueType::make_type(CellType::DOUBLE, {{_dimension}})));
return validSource;
}
@@ -63,10 +63,14 @@ createAttributeExecutor(const search::fef::IQueryEnvironment &env,
" Returning empty tensor.", attrName.c_str());
return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
}
- if (attribute->getCollectionType() != search::attribute::CollectionType::ARRAY ||
- attribute->isFloatingPointType()) {
- LOG(warning, "The attribute vector '%s' is NOT of type array of string or integer."
- " Returning empty tensor.", attrName.c_str());
+ if (attribute->isFloatingPointType()) {
+ LOG(warning, "The attribute vector '%s' must have basic type string or integer."
+ " Returning empty tensor.", attrName.c_str());
+ return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
+ }
+ if (attribute->getCollectionType() == search::attribute::CollectionType::WSET) {
+ LOG(warning, "The attribute vector '%s' is a weighted set - use tensorFromWeightedSet instead."
+ " Returning empty tensor.", attrName.c_str());
return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
}
// Note that for array attribute vectors the default weight is 1.0 for all values.
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzGroup.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzGroup.java
new file mode 100644
index 00000000000..2608af381a2
--- /dev/null
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzGroup.java
@@ -0,0 +1,41 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.athenz.api;
+
+import java.util.Objects;
+
+public class AthenzGroup {
+ private final AthenzDomain domain;
+ private final String groupName;
+
+ public AthenzGroup(AthenzDomain domain, String groupName) {
+ this.domain = domain;
+ this.groupName = groupName;
+ }
+
+ public AthenzGroup(String domain, String groupName) {
+ this.domain = new AthenzDomain(domain);
+ this.groupName = groupName;
+ }
+
+ public AthenzDomain domain() {
+ return domain;
+ }
+
+ public String groupName() {
+ return groupName;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AthenzGroup that = (AthenzGroup) o;
+ return Objects.equals(domain, that.domain) && Objects.equals(groupName, that.groupName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(domain, groupName);
+ }
+}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
index f73ac9c3535..5817eb0c8d2 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.athenz.client.zms;
import com.yahoo.vespa.athenz.api.AthenzDomain;
+import com.yahoo.vespa.athenz.api.AthenzGroup;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzResourceName;
import com.yahoo.vespa.athenz.api.AthenzRole;
@@ -112,7 +113,7 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient {
@Override
public void addRoleMember(AthenzRole role, AthenzIdentity member) {
URI uri = zmsUrl.resolve(String.format("domain/%s/role/%s/member/%s", role.domain().getName(), role.roleName(), member.getFullName()));
- MembershipEntity membership = new MembershipEntity(member.getFullName(), true, role.roleName(), null);
+ MembershipEntity membership = new MembershipEntity.RoleMembershipEntity(member.getFullName(), true, role.roleName(), null);
HttpUriRequest request = RequestBuilder.put(uri)
.setEntity(toJsonStringEntity(membership))
.build();
@@ -133,6 +134,18 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient {
.setUri(uri)
.build();
return execute(request, response -> {
+ MembershipEntity membership = readEntity(response, MembershipEntity.GroupMembershipEntity.class);
+ return membership.isMember;
+ });
+ }
+
+ @Override
+ public boolean getGroupMembership(AthenzGroup group, AthenzIdentity identity) {
+ URI uri = zmsUrl.resolve(String.format("domain/%s/group/%s/member/%s", group.domain().getName(), group.groupName(), identity.getFullName()));
+ HttpUriRequest request = RequestBuilder.get()
+ .setUri(uri)
+ .build();
+ return execute(request, response -> {
MembershipEntity membership = readEntity(response, MembershipEntity.class);
return membership.isMember;
});
@@ -223,7 +236,7 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient {
@Override
public void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry) {
URI uri = zmsUrl.resolve(String.format("domain/%s/role/%s/member/%s/decision", athenzRole.domain().getName(), athenzRole.roleName(), athenzUser.getFullName()));
- MembershipEntity membership = new MembershipEntity(athenzUser.getFullName(), true, athenzRole.roleName(), Long.toString(expiry.getEpochSecond()));
+ MembershipEntity membership = new MembershipEntity.RoleMembershipEntity(athenzUser.getFullName(), true, athenzRole.roleName(), Long.toString(expiry.getEpochSecond()));
HttpUriRequest request = RequestBuilder.put()
.setUri(uri)
.setEntity(toJsonStringEntity(membership))
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java
index 15e8ba77850..245078e3679 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.athenz.client.zms;
import com.yahoo.vespa.athenz.api.AthenzDomain;
+import com.yahoo.vespa.athenz.api.AthenzGroup;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzResourceName;
import com.yahoo.vespa.athenz.api.AthenzRole;
@@ -36,6 +37,8 @@ public interface ZmsClient extends AutoCloseable {
boolean getMembership(AthenzRole role, AthenzIdentity identity);
+ boolean getGroupMembership(AthenzGroup group, AthenzIdentity identity);
+
List<AthenzDomain> getDomainList(String prefix);
boolean hasAccess(AthenzResourceName resource, String action, AthenzIdentity identity);
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipEntity.java
index d0672473776..33acf0e1c90 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipEntity.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipEntity.java
@@ -16,17 +16,14 @@ import com.fasterxml.jackson.annotation.JsonProperty;
public class MembershipEntity {
public final String memberName;
public final boolean isMember;
- public final String roleName;
public final String expiration;
@JsonCreator
public MembershipEntity(@JsonProperty("memberName") String memberName,
@JsonProperty("isMember") boolean isMember,
- @JsonProperty("roleName") String roleName,
@JsonProperty("expiration") String expiration) {
this.memberName = memberName;
this.isMember = isMember;
- this.roleName = roleName;
this.expiration = expiration;
}
@@ -40,13 +37,45 @@ public class MembershipEntity {
return isMember;
}
- @JsonGetter("roleName")
- public String roleName() {
- return roleName;
- }
-
@JsonGetter("expiration")
public String expiration() {
return expiration;
}
-}
+
+ public static class RoleMembershipEntity extends MembershipEntity {
+ public final String roleName;
+
+ @JsonCreator
+ public RoleMembershipEntity(@JsonProperty("memberName") String memberName,
+ @JsonProperty("isMember") boolean isMember,
+ @JsonProperty("roleName") String roleName,
+ @JsonProperty("expiration") String expiration) {
+ super(memberName, isMember, expiration);
+ this.roleName = roleName;
+ }
+
+ @JsonGetter("roleName")
+ public String roleName() {
+ return roleName;
+ }
+
+ }
+
+ public static class GroupMembershipEntity extends MembershipEntity {
+ public final String groupName;
+
+ @JsonCreator
+ public GroupMembershipEntity(@JsonProperty("memberName") String memberName,
+ @JsonProperty("isMember") boolean isMember,
+ @JsonProperty("groupName") String groupName,
+ @JsonProperty("expiration") String expiration) {
+ super(memberName, isMember, expiration);
+ this.groupName = groupName;
+ }
+
+ @JsonGetter("groupName")
+ public String roleName() {
+ return groupName;
+ }
+ }
+} \ No newline at end of file
diff --git a/vespa-feed-client/abi-spec.json b/vespa-feed-client/abi-spec.json
index db9c1ff1a02..ecac167cd8e 100644
--- a/vespa-feed-client/abi-spec.json
+++ b/vespa-feed-client/abi-spec.json
@@ -158,8 +158,12 @@
],
"methods": [
"public void <init>(java.lang.String)",
+ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.String)",
"public void <init>(java.lang.String, java.lang.Throwable)",
- "public void <init>(java.lang.Throwable)"
+ "public void <init>(java.lang.Throwable)",
+ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.Throwable)",
+ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.String, java.lang.Throwable)",
+ "public java.util.Optional documentId()"
],
"fields": []
},
@@ -202,8 +206,8 @@
"abstract"
],
"methods": [
- "public void onNextResult(ai.vespa.feed.client.Result, java.lang.Throwable)",
- "public void onError(java.lang.Throwable)",
+ "public void onNextResult(ai.vespa.feed.client.Result, ai.vespa.feed.client.FeedException)",
+ "public void onError(ai.vespa.feed.client.FeedException)",
"public void onComplete()"
],
"fields": []
@@ -225,18 +229,6 @@
],
"fields": []
},
- "ai.vespa.feed.client.JsonParseException": {
- "superClass": "ai.vespa.feed.client.FeedException",
- "interfaces": [],
- "attributes": [
- "public"
- ],
- "methods": [
- "public void <init>(java.lang.String)",
- "public void <init>(java.lang.String, java.lang.Throwable)"
- ],
- "fields": []
- },
"ai.vespa.feed.client.OperationParameters": {
"superClass": "java.lang.Object",
"interfaces": [],
@@ -261,6 +253,18 @@
],
"fields": []
},
+ "ai.vespa.feed.client.OperationParseException": {
+ "superClass": "ai.vespa.feed.client.FeedException",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public void <init>(java.lang.String, java.lang.Throwable)"
+ ],
+ "fields": []
+ },
"ai.vespa.feed.client.OperationStats": {
"superClass": "java.lang.Object",
"interfaces": [],
@@ -315,5 +319,17 @@
"public java.util.Optional traceMessage()"
],
"fields": []
+ },
+ "ai.vespa.feed.client.ResultParseException": {
+ "superClass": "ai.vespa.feed.client.FeedException",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.String)",
+ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.Throwable)"
+ ],
+ "fields": []
}
} \ No newline at end of file
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
index 250809a48b9..952edfb5464 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
@@ -12,13 +12,21 @@ import java.util.concurrent.CompletableFuture;
*/
public interface FeedClient extends Closeable {
- /** Send a document put with the given parameters, returning a future with the result of the operation. */
+ /**
+ * Send a document put with the given parameters, returning a future with the result of the operation.
+ * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes.
+ * */
CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params);
- /** Send a document update with the given parameters, returning a future with the result of the operation. */
+ /**
+ * Send a document update with the given parameters, returning a future with the result of the operation.
+ * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes.
+ * */
CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params);
- /** Send a document remove with the given parameters, returning a future with the result of the operation. */
+ /** Send a document remove with the given parameters, returning a future with the result of the operation.
+ * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes.
+ * */
CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params);
/** Returns a snapshot of the stats for this feed client, such as requests made, and responses by status. */
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
index e1c6c733e9c..54e11d3a185 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
@@ -1,6 +1,8 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
+import java.util.Optional;
+
/**
* Signals that an error occurred during feeding
*
@@ -8,10 +10,38 @@ package ai.vespa.feed.client;
*/
public class FeedException extends RuntimeException {
- public FeedException(String message) { super(message); }
+ private final DocumentId documentId;
+
+ public FeedException(String message) {
+ super(message);
+ this.documentId = null;
+ }
+
+ public FeedException(DocumentId documentId, String message) {
+ super(message);
+ this.documentId = documentId;
+ }
+
+ public FeedException(String message, Throwable cause) {
+ super(message, cause);
+ this.documentId = null;
+ }
+
+ public FeedException(Throwable cause) {
+ super(cause);
+ this.documentId = null;
+ }
+
+ public FeedException(DocumentId documentId, Throwable cause) {
+ super(cause);
+ this.documentId = documentId;
+ }
- public FeedException(String message, Throwable cause) { super(message, cause); }
+ public FeedException(DocumentId documentId, String message, Throwable cause) {
+ super(message, cause);
+ this.documentId = documentId;
+ }
- public FeedException(Throwable cause) { super(cause); }
+ public Optional<DocumentId> documentId() { return Optional.ofNullable(documentId); }
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
index b160cced4b9..2269c56cde4 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
@@ -6,7 +6,6 @@ import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
import java.io.IOException;
-import java.io.UncheckedIOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.HashMap;
@@ -102,7 +101,10 @@ class HttpFeedClient implements FeedClient {
try {
JsonParser parser = factory.createParser(response.body());
if (parser.nextToken() != JsonToken.START_OBJECT)
- throw new IllegalArgumentException("Expected '" + JsonToken.START_OBJECT + "', but found '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8));
+ throw new ResultParseException(
+ documentId,
+ "Expected '" + JsonToken.START_OBJECT + "', but found '" + parser.currentToken() + "' in: "
+ + new String(response.body(), UTF_8));
String name;
while ((name = parser.nextFieldName()) != null) {
@@ -114,15 +116,20 @@ class HttpFeedClient implements FeedClient {
}
if (parser.currentToken() != JsonToken.END_OBJECT)
- throw new IllegalArgumentException("Expected '" + JsonToken.END_OBJECT + "', but found '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8));
+ throw new ResultParseException(
+ documentId,
+ "Expected '" + JsonToken.END_OBJECT + "', but found '" + parser.currentToken() + "' in: "
+ + new String(response.body(), UTF_8));
}
catch (IOException e) {
- throw new UncheckedIOException(e);
+ throw new ResultParseException(documentId, e);
}
if (type == null) // Not a Vespa response, but a failure in the HTTP layer.
- throw new FeedException("Status " + response.code() + " executing '" + request +
- "': " + (message == null ? new String(response.body(), UTF_8) : message));
+ throw new ResultParseException(
+ documentId,
+ "Status " + response.code() + " executing '" + request + "': "
+ + (message == null ? new String(response.body(), UTF_8) : message));
return new Result(type, documentId, message, trace);
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
index 6b2aec5d8b3..98ff3a5d921 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
@@ -228,6 +228,14 @@ class HttpRequestStrategy implements RequestStrategy {
releaseSlot();
});
+ result.handle((response, error) -> {
+ if (error != null) {
+ if (error instanceof FeedException) throw (FeedException)error;
+ throw new FeedException(documentId, error);
+ }
+ return response;
+ });
+
return result;
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java
index b3a7aca1808..0ba373eef18 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java
@@ -10,7 +10,6 @@ import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
-import java.io.UncheckedIOException;
import java.time.Duration;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
@@ -58,13 +57,13 @@ public class JsonFeeder implements Closeable {
* @param result Non-null if operation completed successfully
* @param error Non-null if operation failed
*/
- default void onNextResult(Result result, Throwable error) { }
+ default void onNextResult(Result result, FeedException error) { }
/**
* Invoked if an unrecoverable error occurred during feed processing,
* after which no other {@link ResultCallback} methods are invoked.
*/
- default void onError(Throwable error) { }
+ default void onError(FeedException error) { }
/**
* Invoked when all feed operations are either completed successfully or failed.
@@ -81,6 +80,7 @@ public class JsonFeeder implements Closeable {
* "fields": { ... document fields ... }
* }
* </pre>
+ * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes.
*/
public CompletableFuture<Result> feedSingle(String json) {
CompletableFuture<Result> result = new CompletableFuture<>();
@@ -94,7 +94,7 @@ public class JsonFeeder implements Closeable {
}
}, resultExecutor);
} catch (Exception e) {
- resultExecutor.execute(() -> result.completeExceptionally(e));
+ resultExecutor.execute(() -> result.completeExceptionally(wrapException(e)));
}
return result;
}
@@ -123,27 +123,32 @@ public class JsonFeeder implements Closeable {
* ]
* </pre>
* Note that {@code "id"} is an alias for the document put operation.
+ * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes.
*/
public CompletableFuture<Void> feedMany(InputStream jsonStream, ResultCallback resultCallback) {
return feedMany(jsonStream, 1 << 26, resultCallback);
}
+ /**
+ * Same as {@link #feedMany(InputStream, ResultCallback)}, but without a provided {@link ResultCallback} instance.
+ * @see JsonFeeder#feedMany(InputStream, ResultCallback) for details.
+ */
public CompletableFuture<Void> feedMany(InputStream jsonStream) {
return feedMany(jsonStream, new ResultCallback() { });
}
CompletableFuture<Void> feedMany(InputStream jsonStream, int size, ResultCallback resultCallback) {
- RingBufferStream buffer = new RingBufferStream(jsonStream, size);
CompletableFuture<Void> overallResult = new CompletableFuture<>();
CompletableFuture<Result> result;
AtomicInteger pending = new AtomicInteger(1); // The below dispatch loop itself is counted as a single pending operation
AtomicBoolean finalCallbackInvoked = new AtomicBoolean();
try {
+ RingBufferStream buffer = new RingBufferStream(jsonStream, size);
while ((result = buffer.next()) != null) {
pending.incrementAndGet();
result.whenCompleteAsync((r, t) -> {
if (!finalCallbackInvoked.get()) {
- resultCallback.onNextResult(r, t);
+ resultCallback.onNextResult(r, (FeedException) t);
}
if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) {
resultCallback.onComplete();
@@ -160,8 +165,9 @@ public class JsonFeeder implements Closeable {
} catch (Exception e) {
if (finalCallbackInvoked.compareAndSet(false, true)) {
resultExecutor.execute(() -> {
- resultCallback.onError(e);
- overallResult.completeExceptionally(e);
+ FeedException wrapped = wrapException(e);
+ resultCallback.onError(wrapped);
+ overallResult.completeExceptionally(wrapped);
});
}
}
@@ -182,6 +188,14 @@ public class JsonFeeder implements Closeable {
}
}
+ private FeedException wrapException(Exception e) {
+ if (e instanceof FeedException) return (FeedException) e;
+ if (e instanceof IOException) {
+ return new OperationParseException("Failed to parse document JSON: " + e.getMessage(), e);
+ }
+ return new FeedException(e);
+ }
+
private class RingBufferStream extends InputStream {
private final byte[] b = new byte[1];
@@ -189,22 +203,21 @@ public class JsonFeeder implements Closeable {
private final byte[] data;
private final int size;
private final Object lock = new Object();
- private Throwable thrown = null;
+ private IOException thrown = null;
private long tail = 0;
private long pos = 0;
private long head = 0;
private boolean done = false;
private final OperationParserAndExecutor parserAndExecutor;
- RingBufferStream(InputStream in, int size) {
+ RingBufferStream(InputStream in, int size) throws IOException {
this.in = in;
this.data = new byte[size];
this.size = size;
new Thread(this::fill, "feed-reader").start();
- try { this.parserAndExecutor = new RingBufferBackedOperationParserAndExecutor(factory.createParser(this)); }
- catch (IOException e) { throw new UncheckedIOException(e); }
+ this.parserAndExecutor = new RingBufferBackedOperationParserAndExecutor(factory.createParser(this));
}
@Override
@@ -220,7 +233,7 @@ public class JsonFeeder implements Closeable {
while ((ready = (int) (head - pos)) == 0 && ! done)
lock.wait();
}
- if (thrown != null) throw new RuntimeException("Error reading input", thrown);
+ if (thrown != null) throw thrown;
if (ready == 0) return -1;
ready = min(ready, len);
@@ -273,7 +286,7 @@ public class JsonFeeder implements Closeable {
while (true) {
int free;
synchronized (lock) {
- while ((free = (int) (tail + size - head)) <= 0 && ! done)
+ while ((free = (int) (tail + size - head)) <= 0 && !done)
lock.wait();
}
if (done) break;
@@ -288,18 +301,22 @@ public class JsonFeeder implements Closeable {
lock.notify();
}
}
- }
- catch (Throwable t) {
+ } catch (InterruptedException e) {
synchronized (lock) {
done = true;
- thrown = t;
+ thrown = new InterruptedIOException("Interrupted reading data: " + e.getMessage());
+ }
+ } catch (IOException e) {
+ synchronized (lock) {
+ done = true;
+ thrown = e;
}
}
}
private class RingBufferBackedOperationParserAndExecutor extends OperationParserAndExecutor {
- RingBufferBackedOperationParserAndExecutor(JsonParser parser) throws IOException { super(parser, true); }
+ RingBufferBackedOperationParserAndExecutor(JsonParser parser) { super(parser, true); }
@Override
String getDocumentJson(long start, long end) {
@@ -334,7 +351,7 @@ public class JsonFeeder implements Closeable {
private final boolean multipleOperations;
private boolean arrayPrefixParsed;
- protected OperationParserAndExecutor(JsonParser parser, boolean multipleOperations) throws IOException {
+ protected OperationParserAndExecutor(JsonParser parser, boolean multipleOperations) {
this.parser = parser;
this.multipleOperations = multipleOperations;
}
@@ -342,82 +359,78 @@ public class JsonFeeder implements Closeable {
abstract String getDocumentJson(long start, long end);
CompletableFuture<Result> next() throws IOException {
- try {
- if (multipleOperations && !arrayPrefixParsed){
- expect(START_ARRAY);
- arrayPrefixParsed = true;
- }
+ if (multipleOperations && !arrayPrefixParsed){
+ expect(START_ARRAY);
+ arrayPrefixParsed = true;
+ }
- JsonToken token = parser.nextToken();
- if (token == END_ARRAY && multipleOperations) return null;
- else if (token == null && !multipleOperations) return null;
- else if (token == START_OBJECT);
- else throw new JsonParseException("Unexpected token '" + parser.currentToken() + "' at offset " + parser.getTokenLocation().getByteOffset());
- long start = 0, end = -1;
- OperationType type = null;
- DocumentId id = null;
- OperationParameters parameters = protoParameters;
- loop: while (true) {
- switch (parser.nextToken()) {
- case FIELD_NAME:
- switch (parser.getText()) {
- case "id":
- case "put": type = PUT; id = readId(); break;
- case "update": type = UPDATE; id = readId(); break;
- case "remove": type = REMOVE; id = readId(); break;
- case "condition": parameters = parameters.testAndSetCondition(readString()); break;
- case "create": parameters = parameters.createIfNonExistent(readBoolean()); break;
- case "fields": {
- expect(START_OBJECT);
- start = parser.getTokenLocation().getByteOffset();
- int depth = 1;
- while (depth > 0) switch (parser.nextToken()) {
- case START_OBJECT: ++depth; break;
- case END_OBJECT: --depth; break;
- }
- end = parser.getTokenLocation().getByteOffset() + 1;
- break;
+ JsonToken token = parser.nextToken();
+ if (token == END_ARRAY && multipleOperations) return null;
+ else if (token == null && !multipleOperations) return null;
+ else if (token == START_OBJECT);
+ else throw new OperationParseException("Unexpected token '" + parser.currentToken() + "' at offset " + parser.getTokenLocation().getByteOffset());
+ long start = 0, end = -1;
+ OperationType type = null;
+ DocumentId id = null;
+ OperationParameters parameters = protoParameters;
+ loop: while (true) {
+ switch (parser.nextToken()) {
+ case FIELD_NAME:
+ switch (parser.getText()) {
+ case "id":
+ case "put": type = PUT; id = readId(); break;
+ case "update": type = UPDATE; id = readId(); break;
+ case "remove": type = REMOVE; id = readId(); break;
+ case "condition": parameters = parameters.testAndSetCondition(readString()); break;
+ case "create": parameters = parameters.createIfNonExistent(readBoolean()); break;
+ case "fields": {
+ expect(START_OBJECT);
+ start = parser.getTokenLocation().getByteOffset();
+ int depth = 1;
+ while (depth > 0) switch (parser.nextToken()) {
+ case START_OBJECT: ++depth; break;
+ case END_OBJECT: --depth; break;
}
- default: throw new JsonParseException("Unexpected field name '" + parser.getText() + "' at offset " +
- parser.getTokenLocation().getByteOffset());
+ end = parser.getTokenLocation().getByteOffset() + 1;
+ break;
}
- break;
+ default: throw new OperationParseException("Unexpected field name '" + parser.getText() + "' at offset " +
+ parser.getTokenLocation().getByteOffset());
+ }
+ break;
- case END_OBJECT:
- break loop;
+ case END_OBJECT:
+ break loop;
- default:
- throw new JsonParseException("Unexpected token '" + parser.currentToken() + "' at offset " +
- parser.getTokenLocation().getByteOffset());
- }
- }
- if (id == null)
- throw new JsonParseException("No document id for document at offset " + start);
-
- if (end < start)
- throw new JsonParseException("No 'fields' object for document at offset " + parser.getTokenLocation().getByteOffset());
- String payload = getDocumentJson(start, end);
- switch (type) {
- case PUT: return client.put (id, payload, parameters);
- case UPDATE: return client.update(id, payload, parameters);
- case REMOVE: return client.remove(id, parameters);
- default: throw new JsonParseException("Unexpected operation type '" + type + "'");
+ default:
+ throw new OperationParseException("Unexpected token '" + parser.currentToken() + "' at offset " +
+ parser.getTokenLocation().getByteOffset());
}
- } catch (com.fasterxml.jackson.core.JacksonException e) {
- throw new JsonParseException("Failed to parse JSON", e);
+ }
+ if (id == null)
+ throw new OperationParseException("No document id for document at offset " + start);
+
+ if (end < start)
+ throw new OperationParseException("No 'fields' object for document at offset " + parser.getTokenLocation().getByteOffset());
+ String payload = getDocumentJson(start, end);
+ switch (type) {
+ case PUT: return client.put (id, payload, parameters);
+ case UPDATE: return client.update(id, payload, parameters);
+ case REMOVE: return client.remove(id, parameters);
+ default: throw new OperationParseException("Unexpected operation type '" + type + "'");
}
}
private void expect(JsonToken token) throws IOException {
if (parser.nextToken() != token)
- throw new JsonParseException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ throw new OperationParseException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
}
private String readString() throws IOException {
String value = parser.nextTextValue();
if (value == null)
- throw new JsonParseException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ throw new OperationParseException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
return value;
@@ -426,7 +439,7 @@ public class JsonFeeder implements Closeable {
private boolean readBoolean() throws IOException {
Boolean value = parser.nextBooleanValue();
if (value == null)
- throw new JsonParseException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ throw new OperationParseException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
return value;
@@ -439,7 +452,6 @@ public class JsonFeeder implements Closeable {
}
-
public static class Builder {
final FeedClient client;
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java
deleted file mode 100644
index 8edf74ec275..00000000000
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
-
-/**
- * Signals that supplied JSON is invalid
- *
- * @author bjorncs
- */
-public class JsonParseException extends FeedException {
-
- public JsonParseException(String message) { super(message); }
-
- public JsonParseException(String message, Throwable cause) { super(message, cause); }
-
-}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java
new file mode 100644
index 00000000000..15ba024bb4e
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java
@@ -0,0 +1,15 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+/**
+ * Signals that supplied JSON for a document/operation is invalid
+ *
+ * @author bjorncs
+ */
+public class OperationParseException extends FeedException {
+
+ public OperationParseException(String message) { super(message); }
+
+ public OperationParseException(String message, Throwable cause) { super(message, cause); }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java
new file mode 100644
index 00000000000..3fd5143e2f4
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java
@@ -0,0 +1,14 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+/**
+ * Signals that the client was unable to parse the result/response from container
+ *
+ * @author bjorncs
+ */
+public class ResultParseException extends FeedException {
+
+ public ResultParseException(DocumentId documentId, String message) { super(documentId, message); }
+
+ public ResultParseException(DocumentId documentId, Throwable cause) { super(documentId, cause); }
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java
index 03194e23d47..3e0f886a40a 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java
@@ -46,7 +46,7 @@ class JsonFeederTest {
" }\n" +
" }\n" +
"]";
- AtomicReference<Throwable> exceptionThrow = new AtomicReference<>();
+ AtomicReference<FeedException> exceptionThrow = new AtomicReference<>();
Path tmpFile = Files.createTempFile(null, null);
Files.write(tmpFile, json.getBytes(UTF_8));
try (InputStream in = Files.newInputStream(tmpFile, StandardOpenOption.READ, StandardOpenOption.DELETE_ON_CLOSE)) {
@@ -58,10 +58,10 @@ class JsonFeederTest {
.feedMany(in, 1 << 7,
new JsonFeeder.ResultCallback() { // TODO: hangs when buffer is smaller than largest document
@Override
- public void onNextResult(Result result, Throwable error) { resultsReceived.incrementAndGet(); }
+ public void onNextResult(Result result, FeedException error) { resultsReceived.incrementAndGet(); }
@Override
- public void onError(Throwable error) { exceptionThrow.set(error); }
+ public void onError(FeedException error) { exceptionThrow.set(error); }
@Override
public void onComplete() { completedSuccessfully.set(true); }
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java
index 579adf9048f..1e616f2625a 100644
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java
@@ -1,8 +1,10 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client.examples;
+import ai.vespa.feed.client.DocumentId;
import ai.vespa.feed.client.FeedClient;
import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.FeedException;
import ai.vespa.feed.client.JsonFeeder;
import ai.vespa.feed.client.Result;
@@ -32,10 +34,11 @@ class JsonFileFeederExample implements Closeable {
final long startTimeMillis = System.currentTimeMillis();;
@Override
- public void onNextResult(Result result, Throwable error) {
+ public void onNextResult(Result result, FeedException error) {
resultsReceived.incrementAndGet();
if (error != null) {
- log.warning("Problems with feeding document");
+ log.warning("Problems with feeding document "
+ + error.documentId().map(DocumentId::toString).orElse("<unknown>"));
errorsReceived.incrementAndGet();
} else if (result.type() == Result.Type.failure) {
log.warning("Problems with docID " + result.documentId() + ":" + error);
@@ -44,8 +47,8 @@ class JsonFileFeederExample implements Closeable {
}
@Override
- public void onError(Throwable error) {
- log.severe("Feeding failed: " + error.getMessage());
+ public void onError(FeedException error) {
+ log.severe("Feeding failed for d: " + error.getMessage());
}
@Override
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
index 15b3d2e9d7d..1c370b14b82 100644
--- a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
@@ -6,7 +6,7 @@ import ai.vespa.feed.client.DryrunResult;
import ai.vespa.feed.client.FeedClient;
import ai.vespa.feed.client.FeedClientBuilder;
import ai.vespa.feed.client.JsonFeeder;
-import ai.vespa.feed.client.JsonParseException;
+import ai.vespa.feed.client.OperationParseException;
import ai.vespa.feed.client.OperationParameters;
import ai.vespa.feed.client.OperationStats;
import ai.vespa.feed.client.Result;
@@ -23,6 +23,7 @@ import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ThreadLocalRandom;
+import java.util.logging.Level;
import java.util.logging.Logger;
import static java.util.stream.Collectors.toList;
@@ -54,13 +55,13 @@ public class VespaRecordWriter extends RecordWriter<Object, Object> {
feeder.feedSingle(json)
.whenComplete((result, error) -> {
if (error != null) {
- if (error instanceof JsonParseException) {
+ if (error instanceof OperationParseException) {
counters.incrementDocumentsSkipped(1);
} else {
String msg = "Failed to feed single document: " + error;
System.out.println(msg);
System.err.println(msg);
- log.warning(msg);
+ log.log(Level.WARNING, msg, error);
counters.incrementDocumentsFailed(1);
}
} else {
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/JobMetrics.java b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/JobMetrics.java
index 73a5dc77743..da5a596edea 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/JobMetrics.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/JobMetrics.java
@@ -10,29 +10,10 @@ import java.util.concurrent.ConcurrentHashMap;
*/
public abstract class JobMetrics {
- private final ConcurrentHashMap<String, Long> incompleteRuns = new ConcurrentHashMap<>();
-
- /** Record starting of a run of a job */
- public void starting(String job) {
- incompleteRuns.merge(job, 1L, Long::sum);
- }
-
- /** Record completion of given job */
- public void recordCompletionOf(String job) {
- incompleteRuns.put(job, 0L);
- }
-
/**
* Records completion of a run of a job.
- * This is guaranteed to always be called once whenever starting has been called.
+ * This is guaranteed to always be called once after each maintainer run.
*/
- public void completed(String job, double successFactor) {
- Long incompleteRuns = this.incompleteRuns.get(job);
- if (incompleteRuns != null) {
- recordCompletion(job, incompleteRuns, successFactor);
- }
- }
-
- protected abstract void recordCompletion(String job, Long incompleteRuns, double successFactor);
+ public abstract void completed(String job, double successFactor);
}
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
index 2a9e6dda6b6..3a5c7e3421d 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
@@ -104,21 +104,19 @@ public abstract class Maintainer implements Runnable {
public final void lockAndMaintain(boolean force) {
if (!force && !jobControl.isActive(name())) return;
log.log(Level.FINE, () -> "Running " + this.getClass().getSimpleName());
- jobMetrics.starting(name());
+
double successFactor = 0;
try (var lock = jobControl.lockJob(name())) {
successFactor = maintain();
- if (successFactor > 0.0)
- jobMetrics.recordCompletionOf(name());
- } catch (UncheckedTimeoutException e) {
- if (ignoreCollision) {
- jobMetrics.recordCompletionOf(name());
- } else {
+ }
+ catch (UncheckedTimeoutException e) {
+ if ( ! ignoreCollision)
log.log(Level.WARNING, this + " collided with another run. Will retry in " + interval);
- }
- } catch (Throwable e) {
+ }
+ catch (Throwable e) {
log.log(Level.WARNING, this + " failed. Will retry in " + interval, e);
- } finally {
+ }
+ finally {
jobMetrics.completed(name(), successFactor);
}
log.log(Level.FINE, () -> "Finished " + this.getClass().getSimpleName());
diff --git a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/JobControlTest.java b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/JobControlTest.java
index 01560c050ff..5700be36413 100644
--- a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/JobControlTest.java
+++ b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/JobControlTest.java
@@ -84,7 +84,7 @@ public class JobControlTest {
private static class NoopJobMetrics extends JobMetrics {
@Override
- protected void recordCompletion(String job, Long incompleteRuns, double successFactor) { }
+ public void completed(String job, double successFactor) { }
}
diff --git a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java
index d2db380f4a1..7c196dc6627 100644
--- a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java
+++ b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java
@@ -7,7 +7,6 @@ import org.junit.Test;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
-import java.util.concurrent.atomic.AtomicLong;
import static org.junit.Assert.assertEquals;
@@ -16,6 +15,8 @@ import static org.junit.Assert.assertEquals;
*/
public class MaintainerTest {
+ private static final double delta = 0.000001;
+
private final JobControl jobControl = new JobControl(new JobControlStateMock());
@Test
@@ -45,40 +46,34 @@ public class MaintainerTest {
TestJobMetrics jobMetrics = new TestJobMetrics();
TestMaintainer maintainer = new TestMaintainer(null, jobControl, jobMetrics);
- // Maintainer fails twice in a row
- maintainer.successOnNextRun(false).run();
- assertEquals(1, jobMetrics.consecutiveFailures.get());
- maintainer.successOnNextRun(false).run();
- assertEquals(2, jobMetrics.consecutiveFailures.get());
-
- // Maintainer runs successfully
- maintainer.successOnNextRun(true).run();
- assertEquals(0, jobMetrics.consecutiveFailures.get());
-
- // Maintainer runs successfully again
- maintainer.run();
- assertEquals(0, jobMetrics.consecutiveFailures.get());
+ maintainer.successOnNextRun(1.0).run();
+ assertEquals(1, jobMetrics.successFactor, delta);
+ maintainer.successOnNextRun(0.0).run();
+ assertEquals(0, jobMetrics.successFactor, delta);
+ maintainer.successOnNextRun(0.1).run();
+ assertEquals(0.1, jobMetrics.successFactor, delta);
// Maintainer throws
maintainer.throwOnNextRun(new RuntimeException()).run();
- assertEquals(1, jobMetrics.consecutiveFailures.get());
+ assertEquals(0, jobMetrics.successFactor, delta);
// Maintainer recovers
maintainer.throwOnNextRun(null).run();
- assertEquals(0, jobMetrics.consecutiveFailures.get());
+ maintainer.successOnNextRun(1.0).run();
+ assertEquals(1, jobMetrics.successFactor, delta);
// Lock exception is treated as a failure
maintainer.throwOnNextRun(new UncheckedTimeoutException()).run();
- assertEquals(1, jobMetrics.consecutiveFailures.get());
+ assertEquals(0, jobMetrics.successFactor, delta);
}
private static class TestJobMetrics extends JobMetrics {
- AtomicLong consecutiveFailures = new AtomicLong();
+ double successFactor = 0.0;
@Override
- protected void recordCompletion(String job, Long incompleteRuns, double successFactor) {
- consecutiveFailures.set(incompleteRuns);
+ public void completed(String job, double successFactor) {
+ this.successFactor = successFactor;
}
}
diff --git a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java
index 7424b17cab2..a109064e101 100644
--- a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java
+++ b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java
@@ -11,7 +11,7 @@ import java.util.List;
class TestMaintainer extends Maintainer {
private int totalRuns = 0;
- private boolean success = true;
+ private double success = 1.0;
private RuntimeException exceptionToThrow = null;
public TestMaintainer(String name, JobControl jobControl, JobMetrics jobMetrics) {
@@ -22,7 +22,7 @@ class TestMaintainer extends Maintainer {
return totalRuns;
}
- public TestMaintainer successOnNextRun(boolean success) {
+ public TestMaintainer successOnNextRun(double success) {
this.success = success;
return this;
}
@@ -36,7 +36,7 @@ class TestMaintainer extends Maintainer {
protected double maintain() {
if (exceptionToThrow != null) throw exceptionToThrow;
totalRuns++;
- return success ? 1.0 : 0.0;
+ return success;
}
}