diff options
139 files changed, 2041 insertions, 1055 deletions
diff --git a/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java b/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java index 38d831a0b28..da338ad3107 100644 --- a/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java +++ b/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java @@ -3,7 +3,6 @@ package com.yahoo.documentmodel; import com.yahoo.document.DataType; import com.yahoo.document.Document; -import com.yahoo.document.DocumentId; import com.yahoo.document.Field; import com.yahoo.document.StructDataType; import com.yahoo.document.StructuredDataType; @@ -32,34 +31,6 @@ import static java.util.Collections.emptySet; */ public final class NewDocumentType extends StructuredDataType implements DataTypeCollection { - public static final class Name { - - private final String name; - private final int id; - - public Name(String name) { - this(name.hashCode(), name); - } - - public Name(int id, String name) { - this.id = id; - this.name = name; - } - - public String toString() { return name; } - - public final String getName() { return name; } - - public final int getId() { return id; } - - public int hashCode() { return name.hashCode(); } - - public boolean equals(Object other) { - if ( ! (other instanceof Name)) return false; - return name.equals(((Name)other).getName()); - } - } - private final Name name; private final DataTypeRepo dataTypes = new DataTypeRepo(); private final Map<Integer, NewDocumentType> inherits = new LinkedHashMap<>(); @@ -139,7 +110,7 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp } @Override - public Class getValueClass() { + public Class<Document> getValueClass() { return Document.class; } @@ -148,7 +119,8 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp if (!(value instanceof Document)) { return false; } - /** Temporary disabled due to clash with document and covariant return type + /* + Temporary disabled due to clash with document and covariant return type Document doc = (Document) value; if (((NewDocumentType) doc.getDataType()).inherits(this)) { //the value is of this type; or the supertype of the value is of this type, etc.... @@ -162,28 +134,31 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp for (Field f : getFields()) { Field inhF = inherited.getField(f.getName()); if (inhF != null && !inhF.equals(f)) { - throw new IllegalArgumentException("Inherited document '" + inherited.toString() + "' already contains field '" + - inhF.getName() + "'. Can not override with '" + f.getName() + "'."); + throw new IllegalArgumentException("Inherited document '" + inherited + "' already contains field '" + + inhF.getName() + "'. Can not override with '" + f.getName() + "'."); } } for (Field f : inherited.getAllFields()) { for (NewDocumentType side : inherits.values()) { Field sideF = side.getField(f.getName()); if (sideF != null && !sideF.equals(f)) { - throw new IllegalArgumentException("Inherited document '" + side.toString() + "' already contains field '" + - sideF.getName() + "'. Document '" + inherited.toString() + "' also defines field '" + f.getName() + - "'.Multiple inheritance must be disjunctive."); + throw new IllegalArgumentException("Inherited document '" + side + "' already contains field '" + + sideF.getName() + "'. Document '" + inherited + + "' also defines field '" + f.getName() + + "'.Multiple inheritance must be disjunctive."); } } } return true; } + public void inherit(NewDocumentType inherited) { if ( ! inherits.containsKey(inherited.getId())) { verifyInheritance(inherited); inherits.put(inherited.getId(), inherited); } } + public boolean inherits(NewDocumentType superType) { if (getId() == superType.getId()) return true; for (NewDocumentType type : inherits.values()) { @@ -243,7 +218,7 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp @Override public Document createFieldValue() { - return new Document(null, (DocumentId)null); + throw new RuntimeException("Cannot create an instance of " + this); } @Override @@ -375,4 +350,36 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp return importedFieldNames; } + public static final class Name { + + private final String name; + private final int id; + + public Name(String name) { + this(name.hashCode(), name); + } + + public Name(int id, String name) { + this.id = id; + this.name = name; + } + + @Override + public String toString() { return name; } + + public final String getName() { return name; } + + public final int getId() { return id; } + + @Override + public int hashCode() { return name.hashCode(); } + + @Override + public boolean equals(Object other) { + if ( ! (other instanceof Name)) return false; + return name.equals(((Name)other).getName()); + } + + } + } diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java index fed35382b21..9b752c4179f 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java @@ -209,17 +209,13 @@ public class DocumentModelBuilder { private static DataType resolveTemporariesRecurse(DataType type, DataTypeCollection repo, Collection<NewDocumentType> docs) { if (type instanceof TemporaryStructuredDataType) { - NewDocumentType docType = getDocumentType(docs, type.getId()); - if (docType != null) { - type = docType; - return type; - } - DataType real = repo.getDataType(type.getId()); - if (real == null) { - throw new NullPointerException("Can not find type '" + type.toString() + "', impossible."); - } - type = real; - } else if (type instanceof StructDataType) { + DataType struct = repo.getDataType(type.getId()); + if (struct != null) + type = struct; + else + type = getDocumentType(docs, type.getId()); + } + else if (type instanceof StructDataType) { StructDataType dt = (StructDataType) type; for (com.yahoo.document.Field field : dt.getFields()) { if (field.getDataType() != type) { @@ -227,14 +223,17 @@ public class DocumentModelBuilder { field.setDataType(resolveTemporariesRecurse(field.getDataType(), repo, docs)); } } - } else if (type instanceof MapDataType) { + } + else if (type instanceof MapDataType) { MapDataType t = (MapDataType) type; t.setKeyType(resolveTemporariesRecurse(t.getKeyType(), repo, docs)); t.setValueType(resolveTemporariesRecurse(t.getValueType(), repo, docs)); - } else if (type instanceof CollectionDataType) { + } + else if (type instanceof CollectionDataType) { CollectionDataType t = (CollectionDataType) type; t.setNestedType(resolveTemporariesRecurse(t.getNestedType(), repo, docs)); - } else if (type instanceof ReferenceDataType) { + } + else if (type instanceof ReferenceDataType) { ReferenceDataType t = (ReferenceDataType) type; if (t.getTargetType() instanceof TemporaryStructuredDataType) { DataType targetType = resolveTemporariesRecurse(t.getTargetType(), repo, docs); diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java index 4b7b1625a01..9ce1b8bb330 100644 --- a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java +++ b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java @@ -98,7 +98,7 @@ public class Search implements ImmutableSearch { private final DeployLogger deployLogger; private final ModelContext.Properties properties; - /** Testin only */ + /** Testing only */ public Search(String name) { this(name, null, new BaseDeployLogger(), new TestProperties()); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java b/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java index d05913143e4..6203f78fc0c 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java @@ -86,8 +86,8 @@ public class ConfigSentinel extends AbstractService implements SentinelConfig.Pr private SentinelConfig.Connectivity.Builder getConnectivityConfig(boolean enable) { var builder = new SentinelConfig.Connectivity.Builder(); if (enable) { - builder.minOkPercent(40); - builder.maxBadCount(3); + builder.minOkPercent(50); + builder.maxBadCount(2); } else { builder.minOkPercent(0); builder.maxBadCount(Integer.MAX_VALUE); diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java index 51949e78838..efb47e97ccb 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java @@ -316,12 +316,8 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster> } public void handleRedundancy(Redundancy redundancy) { - if (hasIndexedCluster()) { - if (usesHierarchicDistribution()) { - indexedCluster.setMaxNodesDownPerFixedRow((redundancy.effectiveFinalRedundancy() / groupToSpecMap.size()) - 1); - } + if (hasIndexedCluster()) indexedCluster.setSearchableCopies(redundancy.readyCopies()); - } this.redundancy = redundancy; } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java b/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java index 3b694f8986c..786d032578f 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java @@ -15,7 +15,6 @@ public class DispatchTuning { private final Integer maxHitsPerPartition; private DispatchPolicy dispatchPolicy; - private final Double minGroupCoverage; private final Double minActiveDocsCoverage; public Double getTopkProbability() { @@ -27,7 +26,6 @@ public class DispatchTuning { private DispatchTuning(Builder builder) { maxHitsPerPartition = builder.maxHitsPerPartition; dispatchPolicy = builder.dispatchPolicy; - minGroupCoverage = builder.minGroupCoverage; minActiveDocsCoverage = builder.minActiveDocsCoverage; topkProbability = builder.topKProbability; } @@ -41,9 +39,6 @@ public class DispatchTuning { @SuppressWarnings("unused") public void setDispatchPolicy(DispatchPolicy dispatchPolicy) { this.dispatchPolicy = dispatchPolicy; } - /** Returns the percentage of nodes in a group which must be up for that group to receive queries */ - public Double getMinGroupCoverage() { return minGroupCoverage; } - /** Returns the percentage of documents which must be available in a group for that group to receive queries */ public Double getMinActiveDocsCoverage() { return minActiveDocsCoverage; } @@ -51,7 +46,6 @@ public class DispatchTuning { private Integer maxHitsPerPartition; private DispatchPolicy dispatchPolicy; - private Double minGroupCoverage; private Double minActiveDocsCoverage; private Double topKProbability; @@ -81,11 +75,6 @@ public class DispatchTuning { } } - public Builder setMinGroupCoverage(Double minGroupCoverage) { - this.minGroupCoverage = minGroupCoverage; - return this; - } - public Builder setMinActiveDocsCoverage(Double minCoverage) { this.minActiveDocsCoverage = minCoverage; return this; diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java index 64911acae1f..f429e40baa9 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java @@ -25,12 +25,14 @@ public class DomTuningDispatchBuilder { builder.setMaxHitsPerPartition(dispatchElement.childAsInteger("max-hits-per-partition")); builder.setTopKProbability(dispatchElement.childAsDouble("top-k-probability")); builder.setDispatchPolicy(dispatchElement.childAsString("dispatch-policy")); - builder.setMinGroupCoverage(dispatchElement.childAsDouble("min-group-coverage")); builder.setMinActiveDocsCoverage(dispatchElement.childAsDouble("min-active-docs-coverage")); + if (dispatchElement.child("min-group-coverage") != null) + logger.logApplicationPackage(Level.WARNING, "Attribute 'min-group-coverage' is deprecated and ignored: " + + "Use min-active-docs-coverage instead."); if (dispatchElement.child("use-local-node") != null) logger.logApplicationPackage(Level.WARNING, "Attribute 'use-local-node' is deprecated and ignored: " + - "The local node will automatically be preferred when appropriate."); + "The local node will automatically be preferred when appropriate."); return builder.build(); } diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java b/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java index 384f77737c1..3e70bda216b 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java @@ -54,19 +54,18 @@ public class DispatchGroup { public int getSearchableCopies() { return sc.getSearchableCopies(); } - public int getMaxNodesDownPerFixedRow() { - return sc.getMaxNodesDownPerFixedRow(); - } - static class Iterator implements java.util.Iterator<SearchInterface> { + private java.util.Iterator<Map<Integer, SearchInterface>> it1; private java.util.Iterator<SearchInterface> it2; + Iterator(Map<Integer, Map<Integer, SearchInterface> > s) { it1 = s.values().iterator(); if (it1.hasNext()) { it2 = it1.next().values().iterator(); } } + @Override public boolean hasNext() { if (it2 == null) { diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java index c77f44d649c..c99549e82e9 100644 --- a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java +++ b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java @@ -46,7 +46,6 @@ public class IndexedSearchCluster extends SearchCluster private String routingSelector = null; private final List<DocumentDatabase> documentDbs = new LinkedList<>(); private final UnionConfiguration unionCfg; - private int maxNodesDownPerFixedRow = 0; private int searchableCopies = 1; @@ -261,13 +260,6 @@ public class IndexedSearchCluster extends SearchCluster return false; } - int getMaxNodesDownPerFixedRow() { - return maxNodesDownPerFixedRow; - } - - public void setMaxNodesDownPerFixedRow(int value) { - maxNodesDownPerFixedRow = value; - } public int getSearchableCopies() { return searchableCopies; } @@ -305,8 +297,6 @@ public class IndexedSearchCluster extends SearchCluster } if (tuning.dispatch.getMinActiveDocsCoverage() != null) builder.minActivedocsPercentage(tuning.dispatch.getMinActiveDocsCoverage()); - if (tuning.dispatch.getMinGroupCoverage() != null) - builder.minGroupCoverage(tuning.dispatch.getMinGroupCoverage()); if (tuning.dispatch.getDispatchPolicy() != null) { switch (tuning.dispatch.getDispatchPolicy()) { case ADAPTIVE: @@ -320,7 +310,6 @@ public class IndexedSearchCluster extends SearchCluster if (tuning.dispatch.getMaxHitsPerPartition() != null) builder.maxHitsPerNode(tuning.dispatch.getMaxHitsPerPartition()); - builder.maxNodesDownPerGroup(rootDispatch.getMaxNodesDownPerFixedRow()); builder.searchableCopies(rootDispatch.getSearchableCopies()); if (searchCoverage != null) { if (searchCoverage.getMinimum() != null) diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc index 7f52eae6da8..36db55c206c 100644 --- a/config-model/src/main/resources/schema/content.rnc +++ b/config-model/src/main/resources/schema/content.rnc @@ -83,7 +83,7 @@ ClusterControllerTuning = element cluster-controller { DispatchTuning = element dispatch { element max-hits-per-partition { xsd:nonNegativeInteger }? & element dispatch-policy { string "round-robin" | string "adaptive" | string "random" }? & - element min-group-coverage { xsd:double }? & + element min-group-coverage { xsd:double }? & # TODO: Ignored, remove on Vespa 8 element min-active-docs-coverage { xsd:double }? & element top-k-probability { xsd:double }? & element use-local-node { string "true" | string "false" }? diff --git a/config-model/src/test/derived/namecollision/collision.sd b/config-model/src/test/derived/namecollision/collision.sd new file mode 100644 index 00000000000..43dd4830204 --- /dev/null +++ b/config-model/src/test/derived/namecollision/collision.sd @@ -0,0 +1,8 @@ +# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +search collision { + + document collision { + + } + +} diff --git a/config-model/src/test/derived/namecollision/collisionstruct.sd b/config-model/src/test/derived/namecollision/collisionstruct.sd new file mode 100644 index 00000000000..c98efb0b582 --- /dev/null +++ b/config-model/src/test/derived/namecollision/collisionstruct.sd @@ -0,0 +1,15 @@ +# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +search collisionstruct { + + document collisionstruct { + + struct collision { + } + + field structarray type array<collision> { + indexing: summary + } + + } + +} diff --git a/config-model/src/test/derived/namecollision/documentmanager.cfg b/config-model/src/test/derived/namecollision/documentmanager.cfg new file mode 100644 index 00000000000..8d0d89dde35 --- /dev/null +++ b/config-model/src/test/derived/namecollision/documentmanager.cfg @@ -0,0 +1,55 @@ +enablecompression false +datatype[].id 1381038251 +datatype[].structtype[].name "position" +datatype[].structtype[].version 0 +datatype[].structtype[].compresstype NONE +datatype[].structtype[].compresslevel 0 +datatype[].structtype[].compressthreshold 95 +datatype[].structtype[].compressminsize 800 +datatype[].structtype[].field[].name "x" +datatype[].structtype[].field[].datatype 0 +datatype[].structtype[].field[].detailedtype "" +datatype[].structtype[].field[].name "y" +datatype[].structtype[].field[].datatype 0 +datatype[].structtype[].field[].detailedtype "" +datatype[].id -379118517 +datatype[].structtype[].name "collision.header" +datatype[].structtype[].version 0 +datatype[].structtype[].compresstype NONE +datatype[].structtype[].compresslevel 0 +datatype[].structtype[].compressthreshold 95 +datatype[].structtype[].compressminsize 800 +datatype[].id 1557022836 +datatype[].documenttype[].name "collision" +datatype[].documenttype[].version 0 +datatype[].documenttype[].inherits[].name "document" +datatype[].documenttype[].inherits[].version 0 +datatype[].documenttype[].headerstruct -379118517 +datatype[].documenttype[].bodystruct 0 +datatype[].id 1557022836 +datatype[].structtype[].name "collision" +datatype[].structtype[].version 0 +datatype[].structtype[].compresstype NONE +datatype[].structtype[].compresslevel 0 +datatype[].structtype[].compressthreshold 95 +datatype[].structtype[].compressminsize 800 +datatype[].id -1730522993 +datatype[].arraytype[].datatype 1557022836 +datatype[].id -1270379114 +datatype[].structtype[].name "collisionstruct.header" +datatype[].structtype[].version 0 +datatype[].structtype[].compresstype NONE +datatype[].structtype[].compresslevel 0 +datatype[].structtype[].compressthreshold 95 +datatype[].structtype[].compressminsize 800 +datatype[].structtype[].field[].name "structarray" +datatype[].structtype[].field[].datatype -1730522993 +datatype[].structtype[].field[].detailedtype "" +datatype[].id -1723079287 +datatype[].documenttype[].name "collisionstruct" +datatype[].documenttype[].version 0 +datatype[].documenttype[].inherits[].name "document" +datatype[].documenttype[].inherits[].version 0 +datatype[].documenttype[].headerstruct -1270379114 +datatype[].documenttype[].bodystruct 0 +datatype[].documenttype[].fieldsets{[]}.fields[] "structarray" diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java new file mode 100644 index 00000000000..fda9e6327ce --- /dev/null +++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java @@ -0,0 +1,20 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. + +package com.yahoo.searchdefinition.derived; + +import org.junit.Test; + +/** + * Verifies that a struct in a document type is preferred over another dopcument type + * of the same name. + * + * @author bratseth + */ +public class NameCollisionTestCase extends AbstractExportingTestCase { + + @Test + public void testNameCollision() throws Exception { + assertCorrectDeriving("namecollision", "collisionstruct", new TestableDeployLogger()); + } + +} diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java index 8a46aaaa230..27a01750210 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java @@ -17,12 +17,10 @@ public class DispatchTuningTest { DispatchTuning dispatch = new DispatchTuning.Builder() .setMaxHitsPerPartition(69) .setDispatchPolicy("round-robin") - .setMinGroupCoverage(7.5) .setMinActiveDocsCoverage(12.5) .setTopKProbability(18.3) .build(); assertEquals(69, dispatch.getMaxHitsPerPartition().intValue()); - assertEquals(7.5, dispatch.getMinGroupCoverage().doubleValue(), 0.0); assertEquals(12.5, dispatch.getMinActiveDocsCoverage().doubleValue(), 0.0); assertTrue(DispatchTuning.DispatchPolicy.ROUNDROBIN == dispatch.getDispatchPolicy()); assertEquals(18.3, dispatch.getTopkProbability(), 0.0); @@ -33,7 +31,6 @@ public class DispatchTuningTest { .setDispatchPolicy("random") .build(); assertTrue(DispatchTuning.DispatchPolicy.ADAPTIVE == dispatch.getDispatchPolicy()); - assertNull(dispatch.getMinGroupCoverage()); assertNull(dispatch.getMinActiveDocsCoverage()); } @@ -43,7 +40,6 @@ public class DispatchTuningTest { .setDispatchPolicy("adaptive") .build(); assertTrue(DispatchTuning.DispatchPolicy.ADAPTIVE == dispatch.getDispatchPolicy()); - assertNull(dispatch.getMinGroupCoverage()); assertNull(dispatch.getMinActiveDocsCoverage()); } @@ -53,7 +49,6 @@ public class DispatchTuningTest { assertNull(dispatch.getMaxHitsPerPartition()); assertNull(dispatch.getDispatchPolicy()); assertNull(dispatch.getMinActiveDocsCoverage()); - assertNull(dispatch.getMinGroupCoverage()); assertNull(dispatch.getTopkProbability()); } diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java index 3be592e54e7..6c8cb393d3f 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java @@ -201,7 +201,6 @@ public class IndexedHierarchicDistributionTest { assertEquals(8, dg.getRowBits()); assertEquals(3, dg.getNumPartitions()); assertEquals(true, dg.useFixedRowInDispatch()); - assertEquals(1, dg.getMaxNodesDownPerFixedRow()); ArrayList<SearchInterface> list = new ArrayList<>(); for(SearchInterface si : dg.getSearchersIterable()) { list.add(si); diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java index afeffbbc875..1426b094971 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java @@ -73,7 +73,6 @@ public class ClusterTest { joinLines( "<max-hits-per-partition>77</max-hits-per-partition>", "<dispatch-policy>round-robin</dispatch-policy>", - "<min-group-coverage>13</min-group-coverage>", "<min-active-docs-coverage>93</min-active-docs-coverage>", "<top-k-probability>0.777</top-k-probability>"), false); @@ -82,7 +81,6 @@ public class ClusterTest { DispatchConfig config = new DispatchConfig(builder); assertEquals(2, config.searchableCopies()); assertEquals(93.0, config.minActivedocsPercentage(), DELTA); - assertEquals(13.0, config.minGroupCoverage(), DELTA); assertEquals(DispatchConfig.DistributionPolicy.ROUNDROBIN, config.distributionPolicy()); assertEquals(77, config.maxHitsPerNode()); assertEquals(0.777, config.topKProbability(), DELTA); @@ -97,7 +95,6 @@ public class ClusterTest { DispatchConfig config = new DispatchConfig(builder); assertEquals(2, config.searchableCopies()); assertEquals(DispatchConfig.DistributionPolicy.ADAPTIVE, config.distributionPolicy()); - assertEquals(0, config.maxNodesDownPerGroup()); assertEquals(1.0, config.maxWaitAfterCoverageFactor(), DELTA); assertEquals(0, config.minWaitAfterCoverageFactor(), DELTA); assertEquals(8, config.numJrtConnectionsPerNode()); diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java index abfb03e41dd..7533bf1ef9d 100644 --- a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java +++ b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java @@ -44,7 +44,6 @@ public class DomDispatchTuningBuilderTest { " </tuning>" + "</content>"); assertNull(dispatch.getMaxHitsPerPartition()); - assertNull(dispatch.getMinGroupCoverage()); assertNull(dispatch.getMinActiveDocsCoverage()); assertNull(dispatch.getDispatchPolicy()); assertNull(dispatch.getTopkProbability()); @@ -57,14 +56,12 @@ public class DomDispatchTuningBuilderTest { " <tuning>" + " <dispatch>" + " <max-hits-per-partition>69</max-hits-per-partition>" + - " <min-group-coverage>7.5</min-group-coverage>" + " <min-active-docs-coverage>12.5</min-active-docs-coverage>" + " <top-k-probability>0.999</top-k-probability>" + " </dispatch>" + " </tuning>" + "</content>"); assertEquals(69, dispatch.getMaxHitsPerPartition().intValue()); - assertEquals(7.5, dispatch.getMinGroupCoverage().doubleValue(), 0.0); assertEquals(12.5, dispatch.getMinActiveDocsCoverage().doubleValue(), 0.0); assertEquals(0.999, dispatch.getTopkProbability().doubleValue(), 0.0); } diff --git a/config-proxy/src/main/sh/vespa-config-loadtester.sh b/config-proxy/src/main/sh/vespa-config-loadtester.sh index f7cecbf292f..38be1cf7b33 100644 --- a/config-proxy/src/main/sh/vespa-config-loadtester.sh +++ b/config-proxy/src/main/sh/vespa-config-loadtester.sh @@ -79,4 +79,4 @@ export ROOT echo "# Using CLASSPATH=$CLASSPATH, args=$@" -java -cp $CLASSPATH:$ROOT/lib/jars/config-proxy-jar-with-dependencies.jar com.yahoo.vespa.config.benchmark.LoadTester "$@" +java -Xms1g -Xmx1g -cp $CLASSPATH:$ROOT/lib/jars/config-proxy-jar-with-dependencies.jar com.yahoo.vespa.config.benchmark.LoadTester "$@" diff --git a/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java b/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java index 3a8d80e5ffe..182c30a0ece 100644 --- a/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java +++ b/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java @@ -1,4 +1,4 @@ -// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.benchmark; import com.yahoo.collections.Tuple2; @@ -33,6 +33,8 @@ import java.util.Map; import java.util.Optional; import java.util.concurrent.ThreadLocalRandom; +import static com.yahoo.vespa.config.ConfigKey.createFull; + /** * A config client for generating load against a config server or config proxy. * <p> @@ -69,8 +71,7 @@ public class LoadTester { String configsList = parser.getBinarySwitches().get("-l"); String defPath = parser.getBinarySwitches().get("-dd"); debug = parser.getUnarySwitches().contains("-d"); - LoadTester loadTester = new LoadTester(); - loadTester.runLoad(host, port, iterations, threads, configsList, defPath); + new LoadTester().runLoad(host, port, iterations, threads, configsList, defPath); } private void runLoad(String host, int port, int iterations, int threads, @@ -78,7 +79,7 @@ public class LoadTester { configs = readConfigs(configsList); defs = readDefs(defPath); List<LoadThread> threadList = new ArrayList<>(); - long start = System.currentTimeMillis(); + long startInNanos = System.nanoTime(); Metrics m = new Metrics(); for (int i = 0; i < threads; i++) { @@ -91,20 +92,23 @@ public class LoadTester { lt.join(); m.merge(lt.metrics); } - printOutput(start, threads, iterations, m); + printOutput(startInNanos, threads, iterations, m); } private Map<ConfigDefinitionKey, Tuple2<String, String[]>> readDefs(String defPath) throws IOException { Map<ConfigDefinitionKey, Tuple2<String, String[]>> ret = new HashMap<>(); if (defPath == null) return ret; + File defDir = new File(defPath); if (!defDir.isDirectory()) { - System.out.println("# Given def file dir is not a directory: " + defDir.getPath() + " , will not send def contents in requests."); + System.out.println("# Given def file dir is not a directory: " + + defDir.getPath() + " , will not send def contents in requests."); return ret; } - final File[] files = defDir.listFiles(); + File[] files = defDir.listFiles(); if (files == null) { - System.out.println("# Given def file dir has no files: " + defDir.getPath() + " , will not send def contents in requests."); + System.out.println("# Given def file dir has no files: " + + defDir.getPath() + " , will not send def contents in requests."); return ret; } for (File f : files) { @@ -118,20 +122,18 @@ public class LoadTester { return ret; } - private void printOutput(long start, long threads, long iterations, Metrics metrics) { - long stop = System.currentTimeMillis(); - float durSec = (float) (stop - start) / 1000f; + private void printOutput(long startInNanos, long threads, long iterations, Metrics metrics) { + float durSec = (float) (System.nanoTime() - startInNanos) / 1_000_000_000f; StringBuilder sb = new StringBuilder(); - sb.append("#reqs/sec #bytes/sec #avglatency #minlatency #maxlatency #failedrequests\n"); + sb.append("#reqs/sec #avglatency #minlatency #maxlatency #failedrequests\n"); sb.append(((float) (iterations * threads)) / durSec).append(","); - sb.append((metrics.totBytes / durSec)).append(","); - sb.append((metrics.totLatency / threads / iterations)).append(","); + sb.append((metrics.latencyInMillis / threads / iterations)).append(","); sb.append((metrics.minLatency)).append(","); sb.append((metrics.maxLatency)).append(","); sb.append((metrics.failedRequests)); sb.append("\n"); sb.append('#').append(TransportMetrics.getInstance().snapshot().toString()).append('\n'); - System.out.println(sb.toString()); + System.out.println(sb); } private List<ConfigKey<?>> readConfigs(String configsList) throws IOException { @@ -151,23 +153,20 @@ public class LoadTester { private static class Metrics { - long totBytes = 0; - long totLatency = 0; + long latencyInMillis = 0; long failedRequests = 0; long maxLatency = Long.MIN_VALUE; long minLatency = Long.MAX_VALUE; public void merge(Metrics m) { - this.totBytes += m.totBytes; - this.totLatency += m.totLatency; + this.latencyInMillis += m.latencyInMillis; this.failedRequests += m.failedRequests; updateMin(m.minLatency); updateMax(m.maxLatency); } - public void update(long bytes, long latency) { - this.totBytes += bytes; - this.totLatency += latency; + public void update(long latency) { + this.latencyInMillis += latency; updateMin(latency); updateMax(latency); } @@ -189,10 +188,10 @@ public class LoadTester { private class LoadThread extends Thread { - int iterations = 0; - String host = ""; - int port = 0; - Metrics metrics = new Metrics(); + private final int iterations; + private final String host; + private final int port; + private final Metrics metrics = new Metrics(); LoadThread(int iterations, String host, int port) { this.iterations = iterations; @@ -204,58 +203,29 @@ public class LoadTester { public void run() { Spec spec = new Spec(host, port); Target target = connect(spec); - ConfigKey<?> reqKey; - JRTClientConfigRequest request; - int totConfs = configs.size(); - boolean reconnCycle = false; // to log reconn message only once, for instance at restart + for (int i = 0; i < iterations; i++) { - reqKey = configs.get(ThreadLocalRandom.current().nextInt(totConfs)); + ConfigKey<?> reqKey = configs.get(ThreadLocalRandom.current().nextInt(configs.size())); ConfigDefinitionKey dKey = new ConfigDefinitionKey(reqKey); Tuple2<String, String[]> defContent = defs.get(dKey); if (defContent == null && defs.size() > 0) { // Only complain if we actually did run with a def dir System.out.println("# No def found for " + dKey + ", not sending in request."); } - request = getRequest(ConfigKey.createFull(reqKey.getName(), reqKey.getConfigId(), reqKey.getNamespace(), defContent.first), defContent.second); + ConfigKey<?> configKey = createFull(reqKey.getName(), reqKey.getConfigId(), reqKey.getNamespace(), defContent.first); + JRTClientConfigRequest request = createRequest(configKey, defContent.second); if (debug) System.out.println("# Requesting: " + reqKey); - long start = System.currentTimeMillis(); + long start = System.nanoTime(); target.invokeSync(request.getRequest(), 10.0); - long end = System.currentTimeMillis(); + long durationInMillis = (System.nanoTime() - start) / 1_000_000; if (request.isError()) { - if ("Connection lost".equals(request.errorMessage()) || "Connection down".equals(request.errorMessage())) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - if (!reconnCycle) { - System.out.println("# Connection lost, reconnecting..."); - reconnCycle = true; - } - target.close(); - target = connect(spec); - } else { - System.err.println(request.errorMessage()); - } - metrics.incFailedRequests(); + target = handleError(request, spec, target); } else { - if (reconnCycle) { - reconnCycle = false; - System.out.println("# Connection OK"); - } - long duration = end - start; - - if (debug) { - String payload = request.getNewPayload().toString(); - metrics.update(payload.length(), duration); // assume 8 bit... - System.out.println("# Ret: " + payload); - } else { - metrics.update(0, duration); - } + metrics.update(durationInMillis); } } } - private JRTClientConfigRequest getRequest(ConfigKey<?> reqKey, String[] defContent) { + private JRTClientConfigRequest createRequest(ConfigKey<?> reqKey, String[] defContent) { if (defContent == null) defContent = new String[0]; final long serverTimeout = 1000; return JRTClientConfigRequestV3.createWithParams(reqKey, DefContent.fromList(Arrays.asList(defContent)), @@ -266,6 +236,24 @@ public class LoadTester { private Target connect(Spec spec) { return supervisor.connect(spec); } + + private Target handleError(JRTClientConfigRequest request, Spec spec, Target target) { + if (List.of("Connection lost", "Connection down").contains(request.errorMessage())) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + System.out.println("# Connection lost, reconnecting..."); + target.close(); + target = connect(spec); + } else { + System.err.println(request.errorMessage()); + } + metrics.incFailedRequests(); + return target; + } + } } diff --git a/configdefinitions/src/vespa/dispatch.def b/configdefinitions/src/vespa/dispatch.def index aa40c317d75..150fe2e1603 100644 --- a/configdefinitions/src/vespa/dispatch.def +++ b/configdefinitions/src/vespa/dispatch.def @@ -7,10 +7,10 @@ namespace=vespa.config.search # for that group to be included in queries minActivedocsPercentage double default=97.0 -# Minimum coverage for allowing a group to be considered for serving +# Not used. TODO: Remove on Vespa 8 minGroupCoverage double default=100 -# Maximum number of nodes allowed to be down for group to be considered for serving +# Not used. TODO: Remove on Vespa 8 maxNodesDownPerGroup int default=0 # Distribution policy for group selection diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java index 276eb51981c..818e65b6caf 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java @@ -1087,9 +1087,9 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye } ReindexActions reindexActions = actions.getReindexActions(); if ( ! reindexActions.isEmpty()) { - logger.logApplicationPackage(Level.WARNING, - "Change(s) between active and new application that may require re-index:\n" + - reindexActions.format()); + logger.log(Level.WARNING, + "Change(s) between active and new application that may require re-index:\n" + + reindexActions.format()); } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java index 2d336267169..1ab667f8a01 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java @@ -148,6 +148,8 @@ public class Deployment implements com.yahoo.config.provision.Deployment { provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of())); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); + log.info(String.format("%sScheduled service restart of %d nodes: %s", + session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = new ConfigChangeActions( new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions()); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java index 062a21b1f80..cdfdce91500 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java @@ -40,6 +40,11 @@ public class DeploymentMetricsResponse extends SlimeJsonResponse { metrics.setDouble("diskUtil", disk.util()); metrics.setDouble("diskFeedBlockLimit", disk.feedBlockLimit()); }); + + aggregator.reindexingProgress().ifPresent(reindexingProgress -> { + Cursor progressObject = cluster.setObject("reindexingProgress"); + reindexingProgress.forEach(progressObject::setDouble); + }); } } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java index e1135063f97..77e2f923d4a 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java @@ -127,8 +127,10 @@ public class ClusterDeploymentMetricsRetriever { case VESPA_CONTAINER: optionalDouble(values.field("query_latency.sum")).ifPresent(qlSum -> aggregator.get() - .addContainerLatency(qlSum, values.field("query_latency.count").asDouble()) - .addFeedLatency(values.field("feed.latency.sum").asDouble(), values.field("feed.latency.count").asDouble())); + .addContainerLatency(qlSum, values.field("query_latency.count").asDouble())); + optionalDouble(values.field("feed.latency.sum")).ifPresent(flSum -> + aggregator.get() + .addFeedLatency(flSum, values.field("feed.latency.count").asDouble())); break; case VESPA_QRSERVER: optionalDouble(values.field("query_latency.sum")).ifPresent(qlSum -> @@ -146,6 +148,10 @@ public class ClusterDeploymentMetricsRetriever { values.field("cluster-controller.resource_usage.memory_limit.last").asDouble()) .addDiskUsage(values.field("cluster-controller.resource_usage.max_disk_utilization.last").asDouble(), values.field("cluster-controller.resource_usage.disk_limit.last").asDouble())); + optionalDouble(values.field("reindexing.progress.last")).ifPresent(progress -> { + if (progress < 0 || progress >= 1) return; + aggregator.get().addReindexingProgress(metric.field("dimensions").field("documenttype").asString(), progress); + }); break; } } diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java index f27cf942dd8..7ce6d84ad8c 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java @@ -1,6 +1,8 @@ // Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.config.server.metrics; +import java.util.HashMap; +import java.util.Map; import java.util.Optional; /** @@ -15,6 +17,7 @@ public class DeploymentMetricsAggregator { private Double documentCount; private ResourceUsage memoryUsage; private ResourceUsage diskUsage; + private Map<String, Double> reindexingProgress; public synchronized DeploymentMetricsAggregator addFeedLatency(double sum, double count) { this.feed = combineLatency(this.feed, sum, count); @@ -46,6 +49,12 @@ public class DeploymentMetricsAggregator { return this; } + public synchronized DeploymentMetricsAggregator addReindexingProgress(String documentType, double progress) { + if (reindexingProgress == null) this.reindexingProgress = new HashMap<>(); + this.reindexingProgress.put(documentType, progress); + return this; + } + public Optional<Double> aggregateFeedLatency() { return Optional.ofNullable(feed).map(m -> m.sum / m.count).filter(num -> !num.isNaN()); } @@ -80,6 +89,10 @@ public class DeploymentMetricsAggregator { return Optional.ofNullable(diskUsage); } + public Optional<Map<String, Double>> reindexingProgress() { + return Optional.ofNullable(reindexingProgress); + } + private static LatencyMetrics combineLatency(LatencyMetrics metricsOrNull, double sum, double count) { return Optional.ofNullable(metricsOrNull).orElseGet(LatencyMetrics::new).combine(sum, count); diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java index 21db290d5e8..2f7b397cbd9 100644 --- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java +++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java @@ -467,12 +467,7 @@ public class TenantRepository { */ public static String logPre(ApplicationId app) { if (DEFAULT_TENANT.equals(app.tenant())) return ""; - StringBuilder ret = new StringBuilder() - .append(logPre(app.tenant())) - .append("app:" + app.application().value()) - .append(":" + app.instance().value()) - .append(" "); - return ret.toString(); + return "app:" + app.toFullString() + " "; } /** @@ -483,10 +478,7 @@ public class TenantRepository { */ public static String logPre(TenantName tenant) { if (DEFAULT_TENANT.equals(tenant)) return ""; - StringBuilder ret = new StringBuilder() - .append("tenant:" + tenant.value()) - .append(" "); - return ret.toString(); + return "tenant:" + tenant.value() + " "; } private void stateChanged(CuratorFramework framework, ConnectionState connectionState) { diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java index 2cf4d7e7b69..e8dc08d4e8d 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java @@ -615,7 +615,7 @@ public class ApplicationRepositoryTest { applicationRepository.prepare(sessionId2, prepareParams()); exceptionRule.expect(ActivationConflictException.class); - exceptionRule.expectMessage(containsString("tenant:test1 app:testapp:default Cannot activate session 3 because the currently active session (4) has changed since session 3 was created (was 2 at creation time)")); + exceptionRule.expectMessage(containsString("app:test1.testapp.default Cannot activate session 3 because the currently active session (4) has changed since session 3 was created (was 2 at creation time)")); applicationRepository.activate(applicationRepository.getTenant(applicationId()), sessionId2, timeoutBudget, false); } @@ -629,7 +629,7 @@ public class ApplicationRepositoryTest { applicationRepository.prepare(sessionId, prepareParams()); exceptionRule.expect(IllegalArgumentException.class); - exceptionRule.expectMessage(containsString("tenant:test1 app:testapp:default Session 2 is already active")); + exceptionRule.expectMessage(containsString("app:test1.testapp.default Session 2 is already active")); applicationRepository.activate(applicationRepository.getTenant(applicationId()), sessionId, timeoutBudget, false); } diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java index 7fdfbcdbf03..b5bcae65009 100644 --- a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java +++ b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java @@ -70,7 +70,9 @@ public class ClusterDeploymentMetricsRetrieverTest { new DeploymentMetricsAggregator() .addDocumentCount(6000.0) .addMemoryUsage(0.89074, 0.8) - .addDiskUsage(0.83517, 0.75), + .addDiskUsage(0.83517, 0.75) + .addReindexingProgress("test_artifacts", 0.71) + .addReindexingProgress("announcements", 0), aggregatorMap.get(expectedContentCluster) ); @@ -113,6 +115,7 @@ public class ClusterDeploymentMetricsRetrieverTest { compareOptionals(expected.diskUsage(), actual.diskUsage(), (a, b) -> assertDoubles.accept(a.feedBlockLimit(), b.feedBlockLimit())); compareOptionals(expected.memoryUsage(), actual.memoryUsage(), (a, b) -> assertDoubles.accept(a.util(), b.util())); compareOptionals(expected.memoryUsage(), actual.memoryUsage(), (a, b) -> assertDoubles.accept(a.feedBlockLimit(), b.feedBlockLimit())); + assertEquals(expected.reindexingProgress(), actual.reindexingProgress()); } @SuppressWarnings("OptionalUsedAsFieldOrParameterType") diff --git a/configserver/src/test/resources/metrics/clustercontroller_metrics.json b/configserver/src/test/resources/metrics/clustercontroller_metrics.json index 9afcb34d77d..65468749940 100644 --- a/configserver/src/test/resources/metrics/clustercontroller_metrics.json +++ b/configserver/src/test/resources/metrics/clustercontroller_metrics.json @@ -20,6 +20,48 @@ }, { "values": { + "reindexing.progress.last": 0.71 + }, + "dimensions": { + "clustertype": "content", + "clusterid": "content_cluster_id", + "documenttype": "test_artifacts" + } + }, + { + "values": { + "reindexing.progress.last": 1 + }, + "dimensions": { + "clustertype": "content", + "clusterid": "content_cluster_id", + "documenttype": "builds" + } + }, + { + "values": { + "reindexing.progress.last": 0 + }, + "dimensions": { + "clustertype": "content", + "clusterid": "content_cluster_id", + "documenttype": "announcements", + "state": "running" + } + }, + { + "values": { + "reindexing.progress.last": -1 + }, + "dimensions": { + "clustertype": "content", + "clusterid": "content_cluster_id", + "documenttype": "announcements", + "state": "successful" + } + }, + { + "values": { "some.other.metrics": 1 }, "dimensions": { diff --git a/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java b/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java index 0d491d2f0c1..8eee7c11d3e 100644 --- a/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java +++ b/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java @@ -44,7 +44,7 @@ public abstract class BaseNodeMonitor<T> { protected MonitorConfiguration configuration; /** Is the node we monitor part of an internal Vespa cluster or not */ - private boolean internal; + private final boolean internal; public BaseNodeMonitor(boolean internal) { this.internal=internal; diff --git a/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java b/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java index 27d8bb27ee8..c9b8aeee417 100644 --- a/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java +++ b/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java @@ -25,9 +25,9 @@ import java.util.logging.Logger; */ public class ClusterMonitor<T> { - private final MonitorConfiguration configuration = new MonitorConfiguration(); + private static final Logger log = Logger.getLogger(ClusterMonitor.class.getName()); - private static Logger log = Logger.getLogger(ClusterMonitor.class.getName()); + private final MonitorConfiguration configuration = new MonitorConfiguration(); private final NodeManager<T> nodeManager; diff --git a/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java b/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java index a2fb982e3c5..95f51b374d6 100644 --- a/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java +++ b/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java @@ -9,7 +9,7 @@ package com.yahoo.search.cluster; public class MonitorConfiguration { /** The interval in ms between consecutive checks of the monitored nodes */ - private long checkInterval=1000; + private long checkInterval = 1000; /** The number of milliseconds to attempt to complete a request before giving up */ private final long requestTimeout = 980; @@ -18,6 +18,7 @@ public class MonitorConfiguration { private long failLimit = 5000; /** Sets the interval between each ping of idle or failing nodes. Default is 1000 ms. */ + @Deprecated // TODO: Remove on Vespa 8 public void setCheckInterval(long intervalMs) { this.checkInterval = intervalMs; } /** Returns the interval between each ping of idle or failing nodes. Default is 1000 ms. */ @@ -59,6 +60,7 @@ public class MonitorConfiguration { * Sets the number of milliseconds a node is allowed to fail before we * mark it as not working */ + @Deprecated // TODO: Remove on Vespa 8 public void setFailLimit(long failLimit) { this.failLimit=failLimit; } /** @@ -86,6 +88,7 @@ public class MonitorConfiguration { @Deprecated // TODO: Remove on Vespa 8 public void setQuarantineTime(long quarantineTime) { } + @Override public String toString() { return "monitor configuration [" + "checkInterval: " + checkInterval + diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java index 159a42676ec..9ae25518969 100644 --- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java +++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java @@ -368,7 +368,7 @@ public class SearchCluster implements NodeManager<Node> { */ public boolean isPartialGroupCoverageSufficient(List<Node> nodes) { if (orderedGroups().size() == 1) - return nodes.size() >= wantedGroupSize() - dispatchConfig.maxNodesDownPerGroup(); + return true; long activeDocuments = nodes.stream().mapToLong(Node::getActiveDocuments).sum(); return isGroupCoverageSufficient(activeDocuments, medianDocumentsPerGroup()); } @@ -378,7 +378,6 @@ public class SearchCluster implements NodeManager<Node> { boolean changed = group.isFullCoverageStatusChanged(fullCoverage); if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) { nextLogTime = System.currentTimeMillis() + 30 * 1000; - int requiredNodes = group.nodes().size() - dispatchConfig.maxNodesDownPerGroup(); if (fullCoverage) { log.info("Cluster " + clusterId + ": " + group + " has full coverage. " + "Active documents: " + group.getActiveDocuments() + "/" + medianDocuments + ", " + @@ -391,7 +390,7 @@ public class SearchCluster implements NodeManager<Node> { } log.warning("Cluster " + clusterId + ": " + group + " has reduced coverage: " + "Active documents: " + group.getActiveDocuments() + "/" + medianDocuments + ", " + - "working nodes: " + group.workingNodes() + "/" + group.nodes().size() + " required " + requiredNodes + + "working nodes: " + group.workingNodes() + "/" + group.nodes().size() + ", unresponsive nodes: " + (unresponsive.toString().isEmpty() ? " none" : unresponsive)); } } diff --git a/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java b/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java index 8b73fa01128..499ed610d34 100644 --- a/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java +++ b/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java @@ -8,7 +8,12 @@ import com.yahoo.search.grouping.request.parser.GroupingParserInput; import com.yahoo.search.grouping.request.parser.ParseException; import com.yahoo.search.grouping.request.parser.TokenMgrException; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; + /** * This class represents a single node in a grouping operation tree. You may manually construct this tree, or you may diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java b/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java index 9d96b2302d7..8db54218e56 100644 --- a/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java +++ b/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java @@ -119,7 +119,6 @@ public class MockSearchCluster extends SearchCluster { DispatchConfig.Builder builder = new DispatchConfig.Builder(); builder.minActivedocsPercentage(88.0); builder.minGroupCoverage(99.0); - builder.maxNodesDownPerGroup(0); builder.minSearchCoverage(minSearchCoverage); builder.distributionPolicy(DispatchConfig.DistributionPolicy.Enum.ROUNDROBIN); if (minSearchCoverage < 100.0) { diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java index 0e11bcdccaf..ee74aca0e14 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java @@ -23,11 +23,13 @@ public class ClusterMetrics { private final String clusterId; private final String clusterType; private final Map<String, Double> metrics; + private final Map<String, Double> reindexingProgress; - public ClusterMetrics(String clusterId, String clusterType, Map<String, Double> metrics) { + public ClusterMetrics(String clusterId, String clusterType, Map<String, Double> metrics, Map<String, Double> reindexingProgress) { this.clusterId = clusterId; this.clusterType = clusterType; this.metrics = Map.copyOf(metrics); + this.reindexingProgress = Map.copyOf(reindexingProgress); } public String getClusterId() { @@ -74,4 +76,7 @@ public class ClusterMetrics { return Optional.ofNullable(metrics.get(DISK_FEED_BLOCK_LIMIT)); } + public Map<String, Double> reindexingProgress() { + return reindexingProgress; + } } diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java index dcce25bda95..d3ed804e546 100644 --- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java +++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java @@ -1,6 +1,7 @@ // Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.api.integration.user; +import java.time.LocalDate; import java.util.Objects; /** @@ -9,17 +10,34 @@ import java.util.Objects; public class User { public static final String ATTRIBUTE_NAME = "vespa.user.attributes"; + public static final LocalDate NO_DATE = LocalDate.EPOCH; private final String email; private final String name; private final String nickname; private final String picture; + private final boolean isVerified; + private final int loginCount; + private final LocalDate lastLogin; public User(String email, String name, String nickname, String picture) { this.email = Objects.requireNonNull(email); this.name = name; this.nickname = nickname; this.picture = picture; + this.isVerified = false; + this.loginCount = -1; + this.lastLogin = NO_DATE; + } + + public User(String email, String name, String nickname, String picture, boolean isVerified, int loginCount, LocalDate lastLogin) { + this.email = Objects.requireNonNull(email); + this.name = name; + this.nickname = nickname; + this.picture = picture; + this.isVerified = isVerified; + this.loginCount = loginCount; + this.lastLogin = Objects.requireNonNull(lastLogin); } public String name() { @@ -38,6 +56,12 @@ public class User { return picture; } + public LocalDate lastLogin() { return lastLogin; } + + public boolean isVerified() { return isVerified; } + + public int loginCount() { return loginCount; } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -46,11 +70,14 @@ public class User { return Objects.equals(name, user.name) && Objects.equals(email, user.email) && Objects.equals(nickname, user.nickname) && - Objects.equals(picture, user.picture); + Objects.equals(picture, user.picture) && + Objects.equals(lastLogin, user.lastLogin) && + loginCount == user.loginCount && + isVerified == user.isVerified; } @Override public int hashCode() { - return Objects.hash(name, email, nickname, picture); + return Objects.hash(name, email, nickname, picture, lastLogin, loginCount, isVerified); } } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java index c867b97b544..ff10f3b77ca 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java @@ -445,6 +445,11 @@ public class ApplicationController { // Validate new deployment spec thoroughly before storing it. controller.jobController().deploymentStatus(application.get()); + // Clear notifications for instances that are no longer declared + for (var name : existingInstances) + if ( ! declaredInstances.contains(name)) + controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name))); + store(application); return application; } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java index 2322b251fe0..4f01df21430 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java @@ -1,15 +1,14 @@ // Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.vespa.hosted.controller.application; -import com.google.common.collect.ImmutableList; - import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.UncheckedIOException; import java.nio.file.Path; -import java.util.Arrays; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.function.Predicate; import java.util.zip.ZipEntry; @@ -21,19 +20,19 @@ import java.util.zip.ZipOutputStream; */ public class ZipStreamReader { - private final ImmutableList<ZipEntryWithContent> entries; + private final List<ZipEntryWithContent> entries = new ArrayList<>(); private final int maxEntrySizeInBytes; public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes) { this.maxEntrySizeInBytes = maxEntrySizeInBytes; try (ZipInputStream zipInput = new ZipInputStream(input)) { - ImmutableList.Builder<ZipEntryWithContent> builder = new ImmutableList.Builder<>(); ZipEntry zipEntry; + while (null != (zipEntry = zipInput.getNextEntry())) { if (!entryNameMatcher.test(requireName(zipEntry.getName()))) continue; - builder.add(new ZipEntryWithContent(zipEntry, readContent(zipInput))); + entries.add(new ZipEntryWithContent(zipEntry, readContent(zipInput))); } - entries = builder.build(); + } catch (IOException e) { throw new UncheckedIOException("IO error reading zip content", e); } @@ -79,10 +78,10 @@ public class ZipStreamReader { } } - public List<ZipEntryWithContent> entries() { return entries; } + public List<ZipEntryWithContent> entries() { return Collections.unmodifiableList(entries); } private static String requireName(String name) { - if (Arrays.asList(name.split("/")).contains("..") || + if (List.of(name.split("/")).contains("..") || !trimTrailingSlash(name).equals(Path.of(name).normalize().toString())) { throw new IllegalArgumentException("Unexpected non-normalized path found in zip content: '" + name + "'"); } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java new file mode 100644 index 00000000000..be8f4254b79 --- /dev/null +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java @@ -0,0 +1,80 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.maintenance; + +import com.yahoo.config.provision.SystemName; +import com.yahoo.vespa.flags.ListFlag; +import com.yahoo.vespa.flags.PermanentFlags; +import com.yahoo.vespa.hosted.controller.Controller; +import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId; +import com.yahoo.vespa.hosted.controller.tenant.LastLoginInfo; +import com.yahoo.vespa.hosted.controller.tenant.Tenant; + +import java.time.Duration; +import java.util.List; +import java.util.stream.Collectors; + +/** + * Expires unused tenants from Vespa Cloud. + * + * @author ogronnesby + */ +public class CloudTrialExpirer extends ControllerMaintainer { + + private static Duration loginExpiry = Duration.ofDays(14); + private final ListFlag<String> extendedTrialTenants; + + public CloudTrialExpirer(Controller controller, Duration interval) { + super(controller, interval, null, SystemName.allOf(SystemName::isPublic)); + this.extendedTrialTenants = PermanentFlags.EXTENDED_TRIAL_TENANTS.bindTo(controller().flagSource()); + } + + @Override + protected double maintain() { + var expiredTenants = controller().tenants().asList().stream() + .filter(this::tenantIsCloudTenant) // only valid for cloud tenants + .filter(this::tenantHasTrialPlan) // only valid to expire actual trial tenants + .filter(this::tenantIsNotExemptFromExpiry) // feature flag might exempt tenant from expiry + .filter(this::tenantReadersNotLoggedIn) // no user logged in last 14 days + .filter(this::tenantHasNoDeployments) // no running deployments active + .collect(Collectors.toList()); + + expireTenants(expiredTenants); + + return 0; + } + + private boolean tenantIsCloudTenant(Tenant tenant) { + return tenant.type() == Tenant.Type.cloud; + } + + private boolean tenantReadersNotLoggedIn(Tenant tenant) { + return tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user) + .map(instant -> { + var sinceLastLogin = Duration.between(instant, controller().clock().instant()); + return sinceLastLogin.compareTo(loginExpiry) > 0; + }) + .orElse(false); + } + + private boolean tenantHasTrialPlan(Tenant tenant) { + var planId = controller().serviceRegistry().billingController().getPlan(tenant.name()); + return "trial".equals(planId.value()); + } + + private boolean tenantIsNotExemptFromExpiry(Tenant tenant) { + return ! extendedTrialTenants.value().contains(tenant.name().value()); + } + + private boolean tenantHasNoDeployments(Tenant tenant) { + return controller().applications().asList(tenant.name()).stream() + .flatMap(app -> app.instances().values().stream()) + .mapToLong(instance -> instance.deployments().values().size()) + .sum() == 0; + } + + private void expireTenants(List<Tenant> tenants) { + tenants.forEach(tenant -> { + controller().serviceRegistry().billingController().setPlan(tenant.name(), PlanId.from("none"), false); + }); + } +} diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java index 5a7ef12b246..97c3c9f4091 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java @@ -70,6 +70,7 @@ public class ControllerMaintenance extends AbstractComponent { maintainers.add(new TenantRoleMaintainer(controller, intervals.tenantRoleMaintainer)); maintainers.add(new ChangeRequestMaintainer(controller, intervals.changeRequestMaintainer)); maintainers.add(new VCMRMaintainer(controller, intervals.vcmrMaintainer)); + maintainers.add(new CloudTrialExpirer(controller, intervals.defaultInterval)); } public Upgrader upgrader() { return upgrader; } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java index 20154c4f122..ba4aaf92fc8 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java @@ -69,7 +69,7 @@ public class DeploymentMetricsMaintainer extends ControllerMaintainer { lockedInstance -> lockedInstance.with(existingDeployment.zone(), newMetrics) .recordActivityAt(now, existingDeployment.zone()))); - controller().notificationsDb().setDeploymentFeedingBlockedNotifications(deploymentId, clusterMetrics); + controller().notificationsDb().setDeploymentMetricsNotifications(deploymentId, clusterMetrics); }); } catch (Exception e) { failures.incrementAndGet(); diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java index e71fcf12b23..203c8187c2c 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java @@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.controller.maintenance; import com.yahoo.component.Version; import com.yahoo.config.provision.CloudName; -import com.yahoo.config.provision.Environment; import com.yahoo.config.provision.zone.ZoneApi; import com.yahoo.vespa.hosted.controller.Controller; import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node; @@ -89,12 +88,18 @@ public class OsUpgrader extends InfrastructureUpgrader<OsVersionTarget> { /** Returns the available upgrade budget for given zone */ private Duration zoneBudgetOf(Duration totalBudget, ZoneApi zone) { - if (!zone.getEnvironment().isProduction()) return Duration.ZERO; - long consecutiveProductionZones = upgradePolicy.asList().stream() - .filter(parallelZones -> parallelZones.stream().map(ZoneApi::getEnvironment) - .anyMatch(Environment::isProduction)) - .count(); - return totalBudget.dividedBy(consecutiveProductionZones); + if (!spendBudget(zone)) return Duration.ZERO; + long consecutiveZones = upgradePolicy.asList().stream() + .filter(parallelZones -> parallelZones.stream().anyMatch(this::spendBudget)) + .count(); + return totalBudget.dividedBy(consecutiveZones); + } + + /** Returns whether to spend upgrade budget on given zone */ + private boolean spendBudget(ZoneApi zone) { + if (!zone.getEnvironment().isProduction()) return false; + if (controller().zoneRegistry().systemZone().getVirtualId().equals(zone.getVirtualId())) return false; // Controller zone + return true; } /** Returns whether node is in a state where it can be upgraded */ diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java index ea0422ea9fc..b65a9290e43 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java @@ -62,7 +62,7 @@ public class Notification { public enum Level { // Must be ordered in order of importance - warning, error + info, warning, error } public enum Type { @@ -73,7 +73,10 @@ public class Notification { deployment, /** Application cluster is (near) external feed blocked */ - feedBlock; + feedBlock, + + /** Application cluster is reindexing document(s) */ + reindex; } } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java index 21df0c01f0f..7c2d990750c 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java @@ -2,6 +2,7 @@ package com.yahoo.vespa.hosted.controller.notification; import com.yahoo.collections.Pair; +import com.yahoo.config.provision.ApplicationId; import com.yahoo.config.provision.ClusterSpec; import com.yahoo.vespa.curator.Lock; import com.yahoo.vespa.hosted.controller.Controller; @@ -16,6 +17,7 @@ import java.util.Comparator; import java.util.List; import java.util.Locale; import java.util.Optional; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -34,6 +36,13 @@ public class NotificationsDb { public NotificationsDb(Controller controller) { this(controller.clock(), controller.curator()); + + Set<DeploymentId> allDeployments = controller.applications().asList().stream() + .flatMap(application -> application.instances().values().stream()) + .flatMap(instance -> instance.deployments().keySet().stream() + .map(zone -> new DeploymentId(instance.id(), zone))) + .collect(Collectors.toSet()); + removeNotificationsForRemovedInstances(allDeployments); } NotificationsDb(Clock clock, CuratorDb curatorDb) { @@ -41,6 +50,26 @@ public class NotificationsDb { this.curatorDb = curatorDb; } + // TODO (freva): Remove after 7.423 + void removeNotificationsForRemovedInstances(Set<DeploymentId> allDeployments) { + // Prior to 7.423, notifications created for instances that were later removed by being removed from + // deployment.xml were not cleared. This should only affect notifications with type 'deployment' + allDeployments.stream() + .map(deploymentId -> deploymentId.applicationId().tenant()) + .distinct() + .flatMap(tenant -> curatorDb.readNotifications(tenant).stream() + .filter(notification -> notification.type() == Type.deployment && notification.source().zoneId().isPresent()) + .map(Notification::source)) + .filter(source -> { + ApplicationId sourceApplication = ApplicationId.from(source.tenant(), + source.application().get(), + source.instance().get()); + DeploymentId sourceDeployment = new DeploymentId(sourceApplication, source.zoneId().get()); + return ! allDeployments.contains(sourceDeployment); + }) + .forEach(source -> removeNotification(source, Type.deployment)); + } + public List<Notification> listNotifications(NotificationSource source, boolean productionOnly) { return curatorDb.readNotifications(source.tenant()).stream() .filter(notification -> source.contains(notification.source()) && (!productionOnly || notification.source().isProduction())) @@ -95,31 +124,22 @@ public class NotificationsDb { } /** - * Updates feeding blocked notifications for the given deployment based on current cluster metrics. - * Will clear notifications of any cluster not reporting the metrics or whose metrics indicate feed is not blocked, - * while setting notifications for cluster that are (Level.error) or are nearly (Level.warning) feed blocked. + * Updates notifications based on deployment metrics (e.g. feed blocked and reindexing progress) for the given + * deployment based on current cluster metrics. + * Will clear notifications of any cluster not reporting the metrics or whose metrics indicate feed is not blocked + * or reindexing no longer in progress. Will set notification for clusters: + * - that are (Level.error) or are nearly (Level.warning) feed blocked, + * - that are (Level.info) currently reindexing at least 1 document type. */ - public void setDeploymentFeedingBlockedNotifications(DeploymentId deploymentId, List<ClusterMetrics> clusterMetrics) { + public void setDeploymentMetricsNotifications(DeploymentId deploymentId, List<ClusterMetrics> clusterMetrics) { Instant now = clock.instant(); - List<Notification> feedBlockNotifications = clusterMetrics.stream() + List<Notification> newNotifications = clusterMetrics.stream() .flatMap(metric -> { - Optional<Pair<Level, String>> memoryStatus = - resourceUtilToFeedBlockStatus("memory", metric.memoryUtil(), metric.memoryFeedBlockLimit()); - Optional<Pair<Level, String>> diskStatus = - resourceUtilToFeedBlockStatus("disk", metric.diskUtil(), metric.diskFeedBlockLimit()); - if (memoryStatus.isEmpty() && diskStatus.isEmpty()) return Stream.empty(); - - // Find the max among levels - Level level = Stream.of(memoryStatus, diskStatus) - .flatMap(status -> status.stream().map(Pair::getFirst)) - .max(Comparator.comparing(Enum::ordinal)).get(); - List<String> messages = Stream.concat(memoryStatus.stream(), diskStatus.stream()) - .filter(status -> status.getFirst() == level) // Do not mix message from different levels - .map(Pair::getSecond) - .collect(Collectors.toUnmodifiableList()); NotificationSource source = NotificationSource.from(deploymentId, ClusterSpec.Id.from(metric.getClusterId())); - return Stream.of(new Notification(now, Type.feedBlock, level, source, messages)); + return Stream.of(createFeedBlockNotification(source, now, metric), + createReindexNotification(source, now, metric)); }) + .flatMap(Optional::stream) .collect(Collectors.toUnmodifiableList()); NotificationSource deploymentSource = NotificationSource.from(deploymentId); @@ -128,10 +148,11 @@ public class NotificationsDb { List<Notification> updated = Stream.concat( initial.stream() .filter(notification -> - // Filter out old feed block notifications for this deployment - notification.type() != Type.feedBlock || !deploymentSource.contains(notification.source())), + // Filter out old feed block notifications and reindex for this deployment + (notification.type() != Type.feedBlock && notification.type() != Type.reindex) || + !deploymentSource.contains(notification.source())), // ... and add the new notifications for this deployment - feedBlockNotifications.stream()) + newNotifications.stream()) .collect(Collectors.toUnmodifiableList()); if (!initial.equals(updated)) @@ -139,6 +160,33 @@ public class NotificationsDb { } } + private static Optional<Notification> createFeedBlockNotification(NotificationSource source, Instant at, ClusterMetrics metric) { + Optional<Pair<Level, String>> memoryStatus = + resourceUtilToFeedBlockStatus("memory", metric.memoryUtil(), metric.memoryFeedBlockLimit()); + Optional<Pair<Level, String>> diskStatus = + resourceUtilToFeedBlockStatus("disk", metric.diskUtil(), metric.diskFeedBlockLimit()); + if (memoryStatus.isEmpty() && diskStatus.isEmpty()) return Optional.empty(); + + // Find the max among levels + Level level = Stream.of(memoryStatus, diskStatus) + .flatMap(status -> status.stream().map(Pair::getFirst)) + .max(Comparator.comparing(Enum::ordinal)).get(); + List<String> messages = Stream.concat(memoryStatus.stream(), diskStatus.stream()) + .filter(status -> status.getFirst() == level) // Do not mix message from different levels + .map(Pair::getSecond) + .collect(Collectors.toUnmodifiableList()); + return Optional.of(new Notification(at, Type.feedBlock, level, source, messages)); + } + + private static Optional<Notification> createReindexNotification(NotificationSource source, Instant at, ClusterMetrics metric) { + if (metric.reindexingProgress().isEmpty()) return Optional.empty(); + List<String> messages = metric.reindexingProgress().entrySet().stream() + .map(entry -> String.format("document type '%s' (%.1f%% done)", entry.getKey(), 100 * entry.getValue())) + .sorted() + .collect(Collectors.toUnmodifiableList()); + return Optional.of(new Notification(at, Type.reindex, Level.info, source, messages)); + } + /** * Returns a feed block summary for the given resource: the notification level and * notification message for the given resource utilization wrt. given resource limit. diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java index 54dc102d573..06263329091 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java @@ -93,6 +93,7 @@ public class NotificationsSerializer { case applicationPackage: return "applicationPackage"; case deployment: return "deployment"; case feedBlock: return "feedBlock"; + case reindex: return "reindex"; default: throw new IllegalArgumentException("No serialization defined for notification type " + type); } } @@ -102,12 +103,14 @@ public class NotificationsSerializer { case "applicationPackage": return Notification.Type.applicationPackage; case "deployment": return Notification.Type.deployment; case "feedBlock": return Notification.Type.feedBlock; + case "reindex": return Notification.Type.reindex; default: throw new IllegalArgumentException("Unknown serialized notification type value '" + field.asString() + "'"); } } private static String asString(Notification.Level level) { switch (level) { + case info: return "info"; case warning: return "warning"; case error: return "error"; default: throw new IllegalArgumentException("No serialization defined for notification level " + level); @@ -116,6 +119,7 @@ public class NotificationsSerializer { private static Notification.Level levelFrom(Inspector field) { switch (field.asString()) { + case "info": return Notification.Level.info; case "warning": return Notification.Level.warning; case "error": return Notification.Level.error; default: throw new IllegalArgumentException("Unknown serialized notification level value '" + field.asString() + "'"); diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java index 017da94facc..937d3d77fae 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java @@ -525,12 +525,14 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler { case applicationPackage: return "applicationPackage"; case deployment: return "deployment"; case feedBlock: return "feedBlock"; + case reindex: return "reindex"; default: throw new IllegalArgumentException("No serialization defined for notification type " + type); } } private static String notificationLevelAsString(Notification.Level level) { switch (level) { + case info: return "info"; case warning: return "warning"; case error: return "error"; default: throw new IllegalArgumentException("No serialization defined for notification level " + level); diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java index ac9612a56c5..cffdd9fc928 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java @@ -16,7 +16,6 @@ import com.yahoo.slime.Inspector; import com.yahoo.slime.Slime; import com.yahoo.slime.SlimeUtils; import com.yahoo.vespa.hosted.controller.Controller; -import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node; import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest; import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest; import com.yahoo.vespa.hosted.controller.auditlog.AuditLoggingRequestHandler; @@ -134,7 +133,9 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler { Inspector inspector = inspectorOrThrow(request); // For now; mandatory fields - Inspector hostArray = getInspectorFieldOrThrow(inspector, "hosts"); + Inspector hostArray = inspector.field("hosts"); + Inspector switchArray = inspector.field("switches"); + // The impacted hostnames List<String> hostNames = new ArrayList<>(); @@ -142,6 +143,15 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler { hostArray.traverse((ArrayTraverser) (i, host) -> hostNames.add(host.asString())); } + if (switchArray.valid()) { + List<String> switchNames = new ArrayList<>(); + switchArray.traverse((ArrayTraverser) (i, switchName) -> switchNames.add(switchName.asString())); + hostNames.addAll(hostsOnSwitch(switchNames)); + } + + if (hostNames.isEmpty()) + return ErrorResponse.badRequest("No prod hosts in provided host/switch list"); + return doAssessment(hostNames); } @@ -272,13 +282,7 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler { .map(HostName::from) .collect(Collectors.toList()); - var potentialZones = controller.zoneRegistry() - .zones() - .reachable() - .in(Environment.prod) - .ids(); - - for (var zone : potentialZones) { + for (var zone : getProdZones()) { var affectedHostsInZone = controller.serviceRegistry().configServer().nodeRepository().list(zone, affectedHosts); if (!affectedHostsInZone.isEmpty()) return Optional.of(zone); @@ -287,4 +291,20 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler { return Optional.empty(); } + private List<String> hostsOnSwitch(List<String> switches) { + return getProdZones().stream() + .flatMap(zone -> controller.serviceRegistry().configServer().nodeRepository().list(zone, false).stream()) + .filter(node -> node.switchHostname().map(switches::contains).orElse(false)) + .map(node -> node.hostname().value()) + .collect(Collectors.toList()); + } + + private List<ZoneId> getProdZones() { + return controller.zoneRegistry() + .zones() + .reachable() + .in(Environment.prod) + .ids(); + } + } diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java index 6e069b2b5ec..e195401f03a 100644 --- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java +++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java @@ -40,6 +40,8 @@ import com.yahoo.vespa.hosted.controller.tenant.Tenant; import com.yahoo.yolean.Exceptions; import java.security.PublicKey; +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -136,12 +138,16 @@ public class UserApiHandler extends LoggingRequestHandler { RoleDefinition.hostedAccountant); private HttpResponse userMetadata(HttpRequest request) { - @SuppressWarnings("unchecked") - Map<String, String> userAttributes = (Map<String, String>) getAttribute(request, User.ATTRIBUTE_NAME, Map.class); - User user = new User(userAttributes.get("email"), - userAttributes.get("name"), - userAttributes.get("nickname"), - userAttributes.get("picture")); + User user; + if (request.getJDiscRequest().context().get(User.ATTRIBUTE_NAME) instanceof User) { + user = getAttribute(request, User.ATTRIBUTE_NAME, User.class); + } else { + // Remove this after June 2021 (once all security filters are setting this) + @SuppressWarnings("unchecked") + Map<String, String> attr = (Map<String, String>) getAttribute(request, User.ATTRIBUTE_NAME, Map.class); + user = new User(attr.get("email"), attr.get("name"), attr.get("nickname"), attr.get("picture")); + } + Set<Role> roles = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class).roles(); Map<TenantName, List<TenantRole>> tenantRolesByTenantName = roles.stream() @@ -241,6 +247,11 @@ public class UserApiHandler extends LoggingRequestHandler { userObject.setString("email", user.email()); if (user.nickname() != null) userObject.setString("nickname", user.nickname()); if (user.picture() != null) userObject.setString("picture", user.picture()); + userObject.setBool("verified", user.isVerified()); + if (!user.lastLogin().equals(User.NO_DATE)) + userObject.setString("lastLogin", user.lastLogin().format(DateTimeFormatter.ISO_DATE)); + if (user.loginCount() > -1) + userObject.setLong("loginCount", user.loginCount()); } private HttpResponse addTenantRoleMember(String tenantName, HttpRequest request) { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java index fc7a99eb2f0..78f688f545b 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java @@ -27,6 +27,7 @@ import java.util.Date; import java.util.List; import java.util.OptionalInt; import java.util.StringJoiner; +import java.util.zip.Deflater; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; @@ -273,27 +274,27 @@ public class ApplicationPackageBuilder { } ByteArrayOutputStream zip = new ByteArrayOutputStream(); try (ZipOutputStream out = new ZipOutputStream(zip)) { - out.putNextEntry(new ZipEntry(dir + "deployment.xml")); - out.write(deploymentSpec()); - out.closeEntry(); - out.putNextEntry(new ZipEntry(dir + "validation-overrides.xml")); - out.write(validationOverrides()); - out.closeEntry(); - out.putNextEntry(new ZipEntry(dir + "search-definitions/test.sd")); - out.write(searchDefinition()); - out.closeEntry(); - out.putNextEntry(new ZipEntry(dir + "build-meta.json")); - out.write(buildMeta(compileVersion)); - out.closeEntry(); - out.putNextEntry(new ZipEntry(dir + "security/clients.pem")); - out.write(X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8)); - out.closeEntry(); + out.setLevel(Deflater.NO_COMPRESSION); // This is for testing purposes so we skip compression for performance + writeZipEntry(out, dir + "deployment.xml", deploymentSpec()); + writeZipEntry(out, dir + "validation-overrides.xml", validationOverrides()); + writeZipEntry(out, dir + "search-definitions/test.sd", searchDefinition()); + writeZipEntry(out, dir + "build-meta.json", buildMeta(compileVersion)); + if (!trustedCertificates.isEmpty()) { + writeZipEntry(out, dir + "security/clients.pem", X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8)); + } } catch (IOException e) { throw new UncheckedIOException(e); } return new ApplicationPackage(zip.toByteArray()); } + private void writeZipEntry(ZipOutputStream out, String name, byte[] content) throws IOException { + ZipEntry entry = new ZipEntry(name); + out.putNextEntry(entry); + out.write(content); + out.closeEntry(); + } + private static String asIso8601Date(Instant instant) { return new SimpleDateFormat("yyyy-MM-dd").format(Date.from(instant)); } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java index 4203051965b..098282e4e89 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java @@ -64,7 +64,6 @@ import java.util.Set; import java.util.UUID; import java.util.logging.Level; import java.util.stream.Collectors; -import java.util.stream.IntStream; import static com.yahoo.config.provision.NodeResources.DiskSpeed.slow; import static com.yahoo.config.provision.NodeResources.StorageType.remote; @@ -168,18 +167,18 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer public void addNodes(List<ZoneId> zones, List<SystemApplication> applications) { for (ZoneId zone : zones) { for (SystemApplication application : applications) { - List<Node> nodes = IntStream.rangeClosed(1, 3) - .mapToObj(i -> new Node.Builder() - .hostname(HostName.from("node-" + i + "-" + application.id().application() - .value() + "-" + zone.value())) - .state(Node.State.active) - .type(application.nodeType()) - .owner(application.id()) - .currentVersion(initialVersion).wantedVersion(initialVersion) - .currentOsVersion(Version.emptyVersion).wantedOsVersion(Version.emptyVersion) - .build()) - .collect(Collectors.toList()); - nodeRepository().putNodes(zone, nodes); + for (int i = 1; i <= 3; i++) { + Node node = new Node.Builder() + .hostname(HostName.from("node-" + i + "-" + application.id().application() + .value() + "-" + zone.value())) + .state(Node.State.active) + .type(application.nodeType()) + .owner(application.id()) + .currentVersion(initialVersion).wantedVersion(initialVersion) + .currentOsVersion(Version.emptyVersion).wantedOsVersion(Version.emptyVersion) + .build(); + nodeRepository().putNode(zone, node); + } convergeServices(application.id(), zone); } } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java index afb56f10c38..4079591730d 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java @@ -59,9 +59,14 @@ public class NodeRepositoryMock implements NodeRepository { /** Add or update given nodes in zone */ public void putNodes(ZoneId zone, List<Node> nodes) { - nodeRepository.putIfAbsent(zone, new HashMap<>()); - nodeRepository.get(zone).putAll(nodes.stream().collect(Collectors.toMap(Node::hostname, - Function.identity()))); + Map<HostName, Node> zoneNodes = nodeRepository.computeIfAbsent(zone, (k) -> new HashMap<>()); + for (var node : nodes) { + zoneNodes.put(node.hostname(), node); + } + } + + public void putNode(ZoneId zone, Node node) { + nodeRepository.computeIfAbsent(zone, (k) -> new HashMap<>()).put(node.hostname(), node); } public void putApplication(ZoneId zone, Application application) { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java index 7fdbab49ba4..10fee56621c 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java @@ -78,8 +78,7 @@ public class ZoneApiMock implements ZoneApi { public static class Builder { - private final SystemName systemName = SystemName.defaultSystem(); - + private SystemName systemName = SystemName.defaultSystem(); private ZoneId id = ZoneId.defaultId(); private ZoneId virtualId ; private CloudName cloudName = CloudName.defaultName(); @@ -90,6 +89,11 @@ public class ZoneApiMock implements ZoneApi { return this; } + public Builder withSystem(SystemName systemName) { + this.systemName = systemName; + return this; + } + public Builder withId(String id) { return with(ZoneId.from(id)); } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java new file mode 100644 index 00000000000..f3c4f9f7438 --- /dev/null +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java @@ -0,0 +1,93 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package com.yahoo.vespa.hosted.controller.maintenance; + +import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.TenantName; +import com.yahoo.vespa.flags.InMemoryFlagSource; +import com.yahoo.vespa.flags.PermanentFlags; +import com.yahoo.vespa.hosted.controller.ControllerTester; +import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId; +import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder; +import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester; +import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock; +import com.yahoo.vespa.hosted.controller.tenant.LastLoginInfo; +import com.yahoo.vespa.hosted.controller.tenant.Tenant; +import org.junit.Test; + +import java.time.Duration; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +/** + * @author ogronnesby + */ +public class CloudTrialExpirerTest { + private final ControllerTester tester = new ControllerTester(SystemName.Public); + private final DeploymentTester deploymentTester = new DeploymentTester(tester); + private final CloudTrialExpirer expirer = new CloudTrialExpirer(tester.controller(), Duration.ofMinutes(5)); + + @Test + public void expire_inactive_tenant() { + registerTenant("trial-tenant", "trial", Duration.ofDays(14).plusMillis(1)); + expirer.maintain(); + assertPlan("trial-tenant", "none"); + } + + @Test + public void keep_inactive_nontrial_tenants() { + registerTenant("not-a-trial-tenant", "pay-as-you-go", Duration.ofDays(30)); + expirer.maintain(); + assertPlan("not-a-trial-tenant", "pay-as-you-go"); + } + + @Test + public void keep_active_trial_tenants() { + registerTenant("active-trial-tenant", "trial", Duration.ofHours(14).minusMillis(1)); + expirer.maintain(); + assertPlan("active-trial-tenant", "trial"); + } + + @Test + public void keep_inactive_exempt_tenants() { + registerTenant("exempt-trial-tenant", "trial", Duration.ofDays(40)); + ((InMemoryFlagSource) tester.controller().flagSource()).withListFlag(PermanentFlags.EXTENDED_TRIAL_TENANTS.id(), List.of("exempt-trial-tenant"), String.class); + expirer.maintain(); + assertPlan("exempt-trial-tenant", "trial"); + } + + @Test + public void keep_inactive_trial_tenants_with_deployments() { + registerTenant("with-deployments", "trial", Duration.ofDays(30)); + registerDeployment("with-deployments", "my-app", "default", "aws-us-east-1c"); + expirer.maintain(); + assertPlan("with-deployments", "trial"); + } + + private void registerTenant(String tenantName, String plan, Duration timeSinceLastLogin) { + var name = TenantName.from(tenantName); + tester.createTenant(tenantName, Tenant.Type.cloud); + tester.serviceRegistry().billingController().setPlan(name, PlanId.from(plan), false); + tester.controller().tenants().updateLastLogin(name, List.of(LastLoginInfo.UserLevel.user), tester.controller().clock().instant().minus(timeSinceLastLogin)); + } + + private void registerDeployment(String tenantName, String appName, String instanceName, String regionName) { + var zone = ZoneApiMock.newBuilder() + .withSystem(tester.zoneRegistry().system()) + .withId("prod." + regionName) + .build(); + tester.zoneRegistry().setZones(zone); + var app = tester.createApplication(tenantName, appName, instanceName); + var ctx = deploymentTester.newDeploymentContext(tenantName, appName, instanceName); + var pkg = new ApplicationPackageBuilder() + .instances("default") + .region(regionName) + .trustDefaultCertificate() + .build(); + ctx.submit(pkg).deploy(); + } + + private void assertPlan(String tenant, String planId) { + assertEquals(planId, tester.serviceRegistry().billingController().getPlan(TenantName.from(tenant)).value()); + } +} diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java index 59fb5b596f1..c45aaa563e1 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java @@ -118,8 +118,8 @@ public class DeploymentMetricsMaintainerTest { @Test public void cluster_metric_aggregation_test() { List<ClusterMetrics> clusterMetrics = List.of( - new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0)), - new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0))); + new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0), Map.of()), + new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0), Map.of())); DeploymentMetrics deploymentMetrics = DeploymentMetricsMaintainer.updateDeploymentMetrics(DeploymentMetrics.none, clusterMetrics); @@ -131,7 +131,7 @@ public class DeploymentMetricsMaintainerTest { } private void setMetrics(ApplicationId application, Map<String, Double> metrics) { - var clusterMetrics = new ClusterMetrics("default", "container", metrics); + var clusterMetrics = new ClusterMetrics("default", "container", metrics, Map.of()); tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, ZoneId.from("dev", "us-east-1")), clusterMetrics); } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java index 3e2fd4ec0b9..664a1fdc83c 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java @@ -119,11 +119,13 @@ public class OsUpgraderTest { @Test public void upgrade_os_with_budget() { CloudName cloud = CloudName.from("cloud"); + ZoneApi zone0 = zone("prod.us-north-42", "prod.controller", cloud); ZoneApi zone1 = zone("dev.us-east-1", cloud); ZoneApi zone2 = zone("prod.us-west-1", cloud); ZoneApi zone3 = zone("prod.us-central-1", cloud); ZoneApi zone4 = zone("prod.eu-west-1", cloud); UpgradePolicy upgradePolicy = UpgradePolicy.create() + .upgrade(zone0) .upgrade(zone1) .upgradeInParallel(zone2, zone3) .upgrade(zone4); @@ -133,6 +135,7 @@ public class OsUpgraderTest { List<SystemApplication> nodeTypes = List.of(SystemApplication.configServerHost, SystemApplication.tenantHost); tester.configServer().bootstrap(List.of(zone1.getId(), zone2.getId(), zone3.getId(), zone4.getId()), nodeTypes); + tester.configServer().addNodes(List.of(zone0.getVirtualId()), List.of(SystemApplication.controllerHost)); // Upgrade with budget Version version = Version.fromString("7.1"); @@ -141,7 +144,16 @@ public class OsUpgraderTest { statusUpdater.maintain(); osUpgrader.maintain(); + // Controllers upgrade first + osUpgrader.maintain(); + assertWanted(version, SystemApplication.controllerHost, zone0); + assertEquals("Controller zone gets a zero budget", Duration.ZERO, upgradeBudget(zone0, SystemApplication.controllerHost, version)); + completeUpgrade(version, SystemApplication.controllerHost, zone0); + statusUpdater.maintain(); + assertEquals(3, nodesOn(version).size()); + // First zone upgrades + osUpgrader.maintain(); for (var nodeType : nodeTypes) { assertEquals("Dev zone gets a zero budget", Duration.ZERO, upgradeBudget(zone1, nodeType, version)); completeUpgrade(version, nodeType, zone1); diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java index 7b4882de3ff..29d77c38b1a 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java @@ -16,7 +16,6 @@ import java.time.Duration; import java.util.Map; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; /** * Tests the traffic fraction updater. This also tests its dependency on DeploymentMetricsMaintainer. @@ -82,7 +81,7 @@ public class TrafficShareUpdaterTest { } private void setQpsMetric(double qps, ApplicationId application, ZoneId zone, DeploymentTester tester) { - var clusterMetrics = new ClusterMetrics("default", "container", Map.of(ClusterMetrics.QUERIES_PER_SECOND, qps)); + var clusterMetrics = new ClusterMetrics("default", "container", Map.of(ClusterMetrics.QUERIES_PER_SECOND, qps), Map.of()); tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, zone), clusterMetrics); } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java index 484b471cbaa..326f4bf311e 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java @@ -164,7 +164,6 @@ public class UpgraderTest { tester.triggerJobs(); assertEquals("Upgrade with error should retry", 1, tester.jobs().active().size()); - // --- Failing application is repaired by changing the application, causing confidence to move above 'high' threshold // Deploy application change default0.submit(applicationPackage("default")); @@ -1114,11 +1113,32 @@ public class UpgraderTest { assertEquals("Upgrade orders are distinct", versions.size(), upgradeOrders.size()); } + private static final ApplicationPackage canaryApplicationPackage = + new ApplicationPackageBuilder().upgradePolicy("canary") + .region("us-west-1") + .region("us-east-3") + .build(); + + private static final ApplicationPackage defaultApplicationPackage = + new ApplicationPackageBuilder().upgradePolicy("default") + .region("us-west-1") + .region("us-east-3") + .build(); + + private static final ApplicationPackage conservativeApplicationPackage = + new ApplicationPackageBuilder().upgradePolicy("conservative") + .region("us-west-1") + .region("us-east-3") + .build(); + + /** Returns empty prebuilt applications for efficiency */ private ApplicationPackage applicationPackage(String upgradePolicy) { - return new ApplicationPackageBuilder().upgradePolicy(upgradePolicy) - .region("us-west-1") - .region("us-east-3") - .build(); + switch (upgradePolicy) { + case "canary" : return canaryApplicationPackage; + case "default" : return defaultApplicationPackage; + case "conservative" : return conservativeApplicationPackage; + default : throw new IllegalArgumentException("No upgrade policy '" + upgradePolicy + "'"); + } } private DeploymentContext createAndDeploy(String applicationName, String upgradePolicy) { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java index 5bd7d1db769..454a4f81524 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java @@ -22,7 +22,9 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -105,57 +107,94 @@ public class NotificationsDbTest { List<Notification> expected = new ArrayList<>(notifications); // No metrics, no new notification - notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of()); + notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of()); assertEquals(expected, curatorDb.readNotifications(tenant)); // Metrics that contain none of the feed block metrics does not create new notification - notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null))); + notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null, Map.of()))); assertEquals(expected, curatorDb.readNotifications(tenant)); // Metrics that only contain util or limit (should not be possible) should not cause any issues - notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, null, null, 0.5))); + notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, null, null, 0.5, Map.of()))); assertEquals(expected, curatorDb.readNotifications(tenant)); // One resource is at warning - notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5))); + notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5, Map.of()))); expected.add(notification(12345, Type.feedBlock, Level.warning, sourceCluster1, "disk (usage: 85.0%, feed block limit: 90.0%)")); assertEquals(expected, curatorDb.readNotifications(tenant)); // Both resources over the limit - notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5))); + notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5, Map.of()))); expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster1, "disk (usage: 95.0%, feed block limit: 90.0%)")); assertEquals(expected, curatorDb.readNotifications(tenant)); // One resource at warning, one at error: Only show error message - notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.7, 0.5))); + notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.7, 0.5, Map.of()))); expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster1, "memory (usage: 70.0%, feed block limit: 50.0%)", "disk (usage: 95.0%, feed block limit: 90.0%)")); assertEquals(expected, curatorDb.readNotifications(tenant)); } @Test - public void feed_blocked_multiple_cluster_test() { + public void deployment_metrics_multiple_cluster_test() { DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenant.value(), "app1", "instance1"), ZoneId.from("prod", "us-south-3")); NotificationSource sourceCluster1 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster1")); NotificationSource sourceCluster2 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster2")); NotificationSource sourceCluster3 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster3")); List<Notification> expected = new ArrayList<>(notifications); - // Cluster1 and cluster2 are having issues - notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of( - clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75), clusterMetrics("cluster3", 0.1, 0.8, 0.2, 0.9))); + // Cluster1 and cluster2 are having feed block issues, cluster 3 is reindexing + notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of( + clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5, Map.of()), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75, Map.of()), clusterMetrics("cluster3", 0.1, 0.8, 0.2, 0.9, Map.of("announcements", 0.75, "build", 0.5)))); expected.add(notification(12345, Type.feedBlock, Level.warning, sourceCluster1, "disk (usage: 85.0%, feed block limit: 90.0%)")); expected.add(notification(12345, Type.feedBlock, Level.error, sourceCluster2, "memory (usage: 90.0%, feed block limit: 75.0%)")); + expected.add(notification(12345, Type.reindex, Level.info, sourceCluster3, "document type 'announcements' (75.0% done)", "document type 'build' (50.0% done)")); assertEquals(expected, curatorDb.readNotifications(tenant)); - // Cluster1 improves, while cluster3 starts having issues - notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of( - clusterMetrics("cluster1", 0.15, 0.9, 0.3, 0.5), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75), clusterMetrics("cluster3", 0.75, 0.8, 0.2, 0.9))); + // Cluster1 improves, while cluster3 starts having feed block issues and finishes reindexing 'build' documents + notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of( + clusterMetrics("cluster1", 0.15, 0.9, 0.3, 0.5, Map.of()), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75, Map.of()), clusterMetrics("cluster3", 0.75, 0.8, 0.2, 0.9, Map.of("announcements", 0.9)))); expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster2, "memory (usage: 90.0%, feed block limit: 75.0%)")); expected.set(7, notification(12345, Type.feedBlock, Level.warning, sourceCluster3, "disk (usage: 75.0%, feed block limit: 80.0%)")); + expected.set(8, notification(12345, Type.reindex, Level.info, sourceCluster3, "document type 'announcements' (90.0% done)")); assertEquals(expected, curatorDb.readNotifications(tenant)); } + @Test + public void removes_invalid_deployment_notifications() { + curatorDb.deleteNotifications(tenant); // Remove notifications set in init() + + ZoneId z1 = ZoneId.from("prod", "us-west-1"); + ZoneId z2 = ZoneId.from("prod", "eu-south-2"); + DeploymentId d1 = new DeploymentId(ApplicationId.from("t1", "a1", "i1"), z1); + DeploymentId d2 = new DeploymentId(ApplicationId.from("t1", "a1", "i1"), z2); + DeploymentId d3 = new DeploymentId(ApplicationId.from("t1", "a1", "i2"), z1); + DeploymentId d4 = new DeploymentId(ApplicationId.from("t1", "a2", "i1"), z2); + DeploymentId d5 = new DeploymentId(ApplicationId.from("t2", "a1", "i1"), z2); + + List<Notification> notifications = Stream.of(d1, d2, d3, d4, d5) + .flatMap(deployment -> Stream.of(Type.deployment, Type.feedBlock) + .map(type -> new Notification(Instant.EPOCH, type, Level.warning, NotificationSource.from(deployment), List.of("msg")))) + .collect(Collectors.toUnmodifiableList()); + notifications.stream().collect(Collectors.groupingBy(notification -> notification.source().tenant(), Collectors.toList())) + .forEach(curatorDb::writeNotifications); + + // All except d3 plus a deployment that has no notifications + Set<DeploymentId> allDeployments = Set.of(d1, d2, d4, d5, new DeploymentId(ApplicationId.from("t3", "a1", "i1"), z1)); + notificationsDb.removeNotificationsForRemovedInstances(allDeployments); + + List<Notification> expectedNotifications = new ArrayList<>(notifications); + // Only the deployment notification for d3 should be cleared (the other types already correctly clear themselves) + expectedNotifications.remove(4); + + List<Notification> actualNotifications = curatorDb.listNotifications().stream() + .flatMap(tenant -> curatorDb.readNotifications(tenant).stream()) + .collect(Collectors.toUnmodifiableList()); + + assertEquals(expectedNotifications.stream().map(Notification::toString).collect(Collectors.joining("\n")), + actualNotifications.stream().map(Notification::toString).collect(Collectors.joining("\n"))); + } + @Before public void init() { curatorDb.writeNotifications(tenant, notifications); @@ -169,12 +208,14 @@ public class NotificationsDbTest { return new Notification(Instant.ofEpochSecond(secondsSinceEpoch), type, level, source, List.of(messages)); } - private static ClusterMetrics clusterMetrics(String clusterId, Double diskUtil, Double diskLimit, Double memoryUtil, Double memoryLimit) { + private static ClusterMetrics clusterMetrics(String clusterId, + Double diskUtil, Double diskLimit, Double memoryUtil, Double memoryLimit, + Map<String, Double> reindexingProgress) { Map<String, Double> metrics = new HashMap<>(); if (diskUtil != null) metrics.put(ClusterMetrics.DISK_UTIL, diskUtil); if (diskLimit != null) metrics.put(ClusterMetrics.DISK_FEED_BLOCK_LIMIT, diskLimit); if (memoryUtil != null) metrics.put(ClusterMetrics.MEMORY_UTIL, memoryUtil); if (memoryLimit != null) metrics.put(ClusterMetrics.MEMORY_FEED_BLOCK_LIMIT, memoryLimit); - return new ClusterMetrics(clusterId, "content", metrics); + return new ClusterMetrics(clusterId, "content", metrics, reindexingProgress); } } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java index d87da62b8f2..80cee3af58b 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java @@ -27,7 +27,6 @@ import java.time.Instant; import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.List; -import java.util.UUID; import static org.junit.Assert.assertEquals; @@ -52,6 +51,7 @@ public class ChangeManagementApiHandlerTest extends ControllerContainerTest { @Test public void test_api() { assertFile(new Request("http://localhost:8080/changemanagement/v1/assessment", "{\"zone\":\"prod.us-east-3\", \"hosts\": [\"host1\"]}", Request.Method.POST), "initial.json"); + assertFile(new Request("http://localhost:8080/changemanagement/v1/assessment", "{\"zone\":\"prod.us-east-3\", \"switches\": [\"switch1\"]}", Request.Method.POST), "initial.json"); assertFile(new Request("http://localhost:8080/changemanagement/v1/vcmr"), "vcmrs.json"); } @@ -98,6 +98,7 @@ public class ChangeManagementApiHandlerTest extends ControllerContainerTest { private Node createNode() { return new Node.Builder() .hostname(HostName.from("host1")) + .switchHostname("switch1") .build(); } diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json index 3cf79977fb8..914ea2f5518 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json @@ -19,6 +19,9 @@ "name": "CloudEventReporter" }, { + "name": "CloudTrialExpirer" + }, + { "name": "ContactInformationMaintainer" }, { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json index 9bd66c16308..ca437dba761 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json @@ -6,11 +6,13 @@ { "name": "administrator@tenant", "email": "administrator@tenant", + "verified": false, "roles": {} }, { "name": "developer@tenant", "email": "developer@tenant", + "verified": false, "roles": {} } ] diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json index 6a1c4c88878..bc921e4bdf4 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json @@ -9,6 +9,7 @@ { "name": "administrator@tenant", "email": "administrator@tenant", + "verified": false, "roles": { "administrator": { "explicit": true, @@ -27,6 +28,7 @@ { "name": "developer@tenant", "email": "developer@tenant", + "verified": false, "roles": { "administrator": { "explicit": false, diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json index 2ae3514bec3..5d3a38334ad 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json @@ -6,7 +6,8 @@ "user": { "name": "Joe Developer", "email": "dev@domail", - "nickname": "dev" + "nickname": "dev", + "verified": false }, "tenants": { "sandbox": { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json index 2d2a137c2ca..ae3dc68d9e3 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json @@ -6,7 +6,8 @@ "user": { "name": "Joe Developer", "email": "dev@domail", - "nickname": "dev" + "nickname": "dev", + "verified":false }, "tenants": { "sandbox": { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json index e03a18a1949..3bf999b490b 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json @@ -6,7 +6,8 @@ "user": { "name": "Joe Developer", "email": "dev@domail", - "nickname": "dev" + "nickname": "dev", + "verified":false }, "tenants": {}, "operator": [ diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json index a7410b14850..27242424579 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json @@ -6,7 +6,8 @@ "user": { "name": "Joe Developer", "email": "dev@domail", - "nickname": "dev" + "nickname": "dev", + "verified":false }, "tenants": {} }
\ No newline at end of file diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java index 047a4461f7c..79b564eee52 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java @@ -66,9 +66,9 @@ public class RoutingPoliciesTest { private static final ZoneId zone3 = ZoneId.from("prod", "aws-us-east-1a"); private static final ZoneId zone4 = ZoneId.from("prod", "aws-us-east-1b"); - private final ApplicationPackage applicationPackage = applicationPackageBuilder().region(zone1.region()) - .region(zone2.region()) - .build(); + private static final ApplicationPackage applicationPackage = applicationPackageBuilder().region(zone1.region()) + .region(zone2.region()) + .build(); @Test public void global_routing_policies() { diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java index 77ce86f1664..4dd283cf5d7 100644 --- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java +++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java @@ -127,11 +127,7 @@ public class VersionStatusTest { @Test public void testVersionStatusAfterApplicationUpdates() { DeploymentTester tester = new DeploymentTester(); - ApplicationPackage applicationPackage = new ApplicationPackageBuilder() - .upgradePolicy("default") - .region("us-west-1") - .region("us-east-3") - .build(); + ApplicationPackage applicationPackage = applicationPackage("default"); Version version1 = new Version("6.2"); Version version2 = new Version("6.3"); @@ -216,10 +212,9 @@ public class VersionStatusTest { Version version0 = new Version("6.2"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().maintain(); - var builder = new ApplicationPackageBuilder().region("us-west-1").region("us-east-3"); // Setup applications - all running on version0 - ApplicationPackage canaryPolicy = builder.upgradePolicy("canary").build(); + ApplicationPackage canaryPolicy = applicationPackage("canary"); var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default") .submit(canaryPolicy) .deploy(); @@ -230,7 +225,7 @@ public class VersionStatusTest { .submit(canaryPolicy) .deploy(); - ApplicationPackage defaultPolicy = builder.upgradePolicy("default").build(); + ApplicationPackage defaultPolicy = applicationPackage("default"); var default0 = tester.newDeploymentContext("tenant1", "default0", "default") .submit(defaultPolicy) .deploy(); @@ -262,7 +257,7 @@ public class VersionStatusTest { .submit(defaultPolicy) .deploy(); - ApplicationPackage conservativePolicy = builder.upgradePolicy("conservative").build(); + ApplicationPackage conservativePolicy = applicationPackage("conservative"); var conservative0 = tester.newDeploymentContext("tenant1", "conservative0", "default") .submit(conservativePolicy) .deploy(); @@ -388,10 +383,10 @@ public class VersionStatusTest { Version version0 = new Version("6.2"); tester.controllerTester().upgradeSystem(version0); tester.upgrader().maintain(); - var appPackage = new ApplicationPackageBuilder().region("us-west-1").region("us-east-3").upgradePolicy("canary"); + var appPackage = applicationPackage("canary"); var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default") - .submit(appPackage.build()) + .submit(appPackage) .deploy(); assertEquals("All applications running on this version: High", @@ -537,13 +532,13 @@ public class VersionStatusTest { Version version0 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version0); var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default") - .submit(new ApplicationPackageBuilder().upgradePolicy("canary").region("us-west-1").build()) + .submit(applicationPackage("canary")) .deploy(); var canary1 = tester.newDeploymentContext("tenant1", "canary1", "default") - .submit(new ApplicationPackageBuilder().upgradePolicy("canary").region("us-west-1").build()) + .submit(applicationPackage("canary")) .deploy(); var default0 = tester.newDeploymentContext("tenant1", "default0", "default") - .submit(new ApplicationPackageBuilder().upgradePolicy("default").region("us-west-1").build()) + .submit(applicationPackage("default")) .deploy(); tester.controllerTester().computeVersionStatus(); assertSame(Confidence.high, tester.controller().readVersionStatus().version(version0).confidence()); @@ -609,12 +604,11 @@ public class VersionStatusTest { public void testStatusIncludesIncompleteUpgrades() { var tester = new DeploymentTester().atMondayMorning(); var version0 = Version.fromString("7.1"); - var applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build(); // Application deploys on initial version tester.controllerTester().upgradeSystem(version0); var context = tester.newDeploymentContext("tenant1", "default0", "default"); - context.submit(applicationPackage).deploy(); + context.submit(applicationPackage("default")).deploy(); // System is upgraded and application starts upgrading to next version var version1 = Version.fromString("7.2"); @@ -688,4 +682,32 @@ public class VersionStatusTest { .orElseThrow(() -> new IllegalArgumentException("Expected to find version: " + version)); } + private static final ApplicationPackage canaryApplicationPackage = + new ApplicationPackageBuilder().upgradePolicy("canary") + .region("us-west-1") + .region("us-east-3") + .build(); + + private static final ApplicationPackage defaultApplicationPackage = + new ApplicationPackageBuilder().upgradePolicy("default") + .region("us-west-1") + .region("us-east-3") + .build(); + + private static final ApplicationPackage conservativeApplicationPackage = + new ApplicationPackageBuilder().upgradePolicy("conservative") + .region("us-west-1") + .region("us-east-3") + .build(); + + /** Returns empty prebuilt applications for efficiency */ + private ApplicationPackage applicationPackage(String upgradePolicy) { + switch (upgradePolicy) { + case "canary" : return canaryApplicationPackage; + case "default" : return defaultApplicationPackage; + case "conservative" : return conservativeApplicationPackage; + default : throw new IllegalArgumentException("No upgrade policy '" + upgradePolicy + "'"); + } + } + } diff --git a/default_build_settings.cmake b/default_build_settings.cmake index 3fd93bd0c25..a61410ebf31 100644 --- a/default_build_settings.cmake +++ b/default_build_settings.cmake @@ -31,11 +31,7 @@ endfunction() function(setup_vespa_default_build_settings_centos_8) message("-- Setting up default build settings for centos 8") set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" "/usr/include/openblas" PARENT_SCOPE) - if (VESPA_OS_DISTRO_NAME STREQUAL "CentOS Stream") - set(DEFAULT_VESPA_LLVM_VERSION "11" PARENT_SCOPE) - else() - set(DEFAULT_VESPA_LLVM_VERSION "10" PARENT_SCOPE) - endif() + set(DEFAULT_VESPA_LLVM_VERSION "11" PARENT_SCOPE) endfunction() function(setup_vespa_default_build_settings_darwin) diff --git a/dist/vespa.spec b/dist/vespa.spec index 109a108a859..f10d6cd3bc3 100644 --- a/dist/vespa.spec +++ b/dist/vespa.spec @@ -16,6 +16,7 @@ %define _create_vespa_user 1 %define _create_vespa_service 1 %define _defattr_is_vespa_vespa 0 +%define _command_cmake cmake3 Name: vespa Version: _VESPA_VERSION_ @@ -42,11 +43,11 @@ BuildRequires: maven %define _java_home /usr/lib/jvm/java-11-amazon-corretto.%{?_arch} BuildRequires: python3-pytest %else -BuildRequires: devtoolset-9-gcc-c++ -BuildRequires: devtoolset-9-libatomic-devel -BuildRequires: devtoolset-9-binutils +BuildRequires: devtoolset-10-gcc-c++ +BuildRequires: devtoolset-10-libatomic-devel +BuildRequires: devtoolset-10-binutils BuildRequires: rh-maven35 -%define _devtoolset_enable /opt/rh/devtoolset-9/enable +%define _devtoolset_enable /opt/rh/devtoolset-10/enable %define _rhmaven35_enable /opt/rh/rh-maven35/enable BuildRequires: python36-pytest %endif @@ -54,19 +55,9 @@ BuildRequires: vespa-pybind11-devel BuildRequires: python3-devel %endif %if 0%{?el8} -%if 0%{?centos} -%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0) -%endif -%if 0%{?_centos_stream} BuildRequires: gcc-toolset-10-gcc-c++ BuildRequires: gcc-toolset-10-binutils %define _devtoolset_enable /opt/rh/gcc-toolset-10/enable -BuildRequires: vespa-boost-devel >= 1.75.0-1 -%else -BuildRequires: gcc-toolset-9-gcc-c++ -BuildRequires: gcc-toolset-9-binutils -%define _devtoolset_enable /opt/rh/gcc-toolset-9/enable -%endif BuildRequires: maven BuildRequires: pybind11-devel BuildRequires: python3-pytest @@ -82,7 +73,7 @@ BuildRequires: python3-devel %if 0%{?el7} BuildRequires: cmake3 BuildRequires: llvm7.0-devel -BuildRequires: vespa-boost-devel >= 1.59.0-6 +BuildRequires: vespa-boost-devel >= 1.76.0-1 BuildRequires: vespa-gtest >= 1.8.1-1 BuildRequires: vespa-icu-devel >= 65.1.0-1 BuildRequires: vespa-lz4-devel >= 1.9.2-2 @@ -101,12 +92,15 @@ BuildRequires: vespa-libzstd-devel >= 1.4.5-2 %endif %if 0%{?el8} BuildRequires: cmake >= 3.11.4-3 -%if 0%{?_centos_stream} -BuildRequires: llvm-devel >= 11.0.0 +%if 0%{?centos} +# Current cmake on CentOS 8 is broken and manually requires libarchive install +BuildRequires: libarchive +%define _command_cmake cmake +BuildRequires: (llvm-devel >= 11.0.0 and llvm-devel < 12) %else -BuildRequires: llvm-devel >= 10.0.1 +BuildRequires: (llvm-devel >= 10.0.1 and llvm-devel < 11) %endif -BuildRequires: boost-devel >= 1.66 +BuildRequires: vespa-boost-devel >= 1.76.0-1 BuildRequires: openssl-devel BuildRequires: vespa-gtest >= 1.8.1-1 BuildRequires: vespa-lz4-devel >= 1.9.2-2 @@ -152,7 +146,7 @@ BuildRequires: gmock-devel %endif %if 0%{?el7} && 0%{?amzn2} BuildRequires: vespa-xxhash-devel = 0.8.0 -BuildRequires: vespa-openblas-devel = 0.3.12 +BuildRequires: vespa-openblas-devel = 0.3.15 BuildRequires: vespa-re2-devel = 20190801 %else BuildRequires: xxhash-devel >= 0.8.0 @@ -225,7 +219,7 @@ Requires: vespa-valgrind >= 3.17.0-1 %endif %endif %if 0%{?el8} -%if 0%{?_centos_stream} +%if 0%{?centos} %define _vespa_llvm_version 11 %else %define _vespa_llvm_version 10 @@ -315,7 +309,7 @@ Requires: vespa-libzstd >= 1.4.5-2 Requires: openblas %else %if 0%{?amzn2} -Requires: vespa-openblas +Requires: vespa-openblas = 0.3.15 %else Requires: openblas-serial %endif @@ -353,10 +347,10 @@ Requires: libicu Requires: openssl-libs %endif %if 0%{?el8} -%if 0%{?_centos_stream} -Requires: llvm-libs >= 11.0.0 +%if 0%{?centos} +Requires: (llvm-libs >= 11.0.0 and llvm-libs < 12) %else -Requires: llvm-libs >= 10.0.1 +Requires: (llvm-libs >= 10.0.1 and llvm-libs < 11) %endif Requires: vespa-protobuf = 3.7.0-5.el8 %endif @@ -488,7 +482,7 @@ mvn --batch-mode -e -N io.takari:maven:wrapper -Dmaven=3.6.3 %endif %{?_use_mvn_wrapper:env VESPA_MAVEN_COMMAND=$(pwd)/mvnw }sh bootstrap.sh java %{?_use_mvn_wrapper:./mvnw}%{!?_use_mvn_wrapper:mvn} --batch-mode -nsu -T 1C install -Dmaven.test.skip=true -Dmaven.javadoc.skip=true -cmake3 -DCMAKE_INSTALL_PREFIX=%{_prefix} \ +%{_command_cmake} -DCMAKE_INSTALL_PREFIX=%{_prefix} \ -DJAVA_HOME=$JAVA_HOME \ -DCMAKE_PREFIX_PATH=%{_vespa_deps_prefix} \ -DEXTRA_LINK_DIRECTORY="%{_extra_link_directory}" \ diff --git a/document/src/main/java/com/yahoo/document/StructDataType.java b/document/src/main/java/com/yahoo/document/StructDataType.java index 73fe580308e..8a153856eff 100644 --- a/document/src/main/java/com/yahoo/document/StructDataType.java +++ b/document/src/main/java/com/yahoo/document/StructDataType.java @@ -22,7 +22,7 @@ public class StructDataType extends BaseStructDataType { super(name); } - public StructDataType(int id,String name) { + public StructDataType(int id, String name) { super(id, name); } diff --git a/document/src/main/java/com/yahoo/document/StructuredDataType.java b/document/src/main/java/com/yahoo/document/StructuredDataType.java index e4bb94a5465..8a5f344e79e 100644 --- a/document/src/main/java/com/yahoo/document/StructuredDataType.java +++ b/document/src/main/java/com/yahoo/document/StructuredDataType.java @@ -10,8 +10,6 @@ import java.util.Collection; import java.util.List; /** - * TODO: What is this and why - * * @author HÃ¥kon Humberset */ public abstract class StructuredDataType extends DataType { diff --git a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java index 0449612da6f..f4139a597d2 100644 --- a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java +++ b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java @@ -2,7 +2,8 @@ package com.yahoo.document; /** - * Internal class, DO NOT USE!! Only public because it must be used from com.yahoo.searchdefinition.parser. + * Internal class, DO NOT USE!! + * Only public because it must be used from com.yahoo.searchdefinition.parser. * * @author Einar M R Rosenvinge */ diff --git a/eval/src/vespa/eval/eval/array_array_map.h b/eval/src/vespa/eval/eval/array_array_map.h index 89fa0c77819..f49e3e4edbb 100644 --- a/eval/src/vespa/eval/eval/array_array_map.h +++ b/eval/src/vespa/eval/eval/array_array_map.h @@ -116,7 +116,7 @@ private: _keys.push_back(k); } } - _values.resize(_values.size() + _values_per_entry, V{}); + _values.resize(_values.size() + _values_per_entry); auto [pos, was_inserted] = _map.insert(MyKey{{tag_id},hash}); assert(was_inserted); return Tag{tag_id}; diff --git a/flags/pom.xml b/flags/pom.xml index 4f1bdcb61e3..3774ab3bf5f 100644 --- a/flags/pom.xml +++ b/flags/pom.xml @@ -93,6 +93,11 @@ <artifactId>junit-jupiter</artifactId> <scope>test</scope> </dependency> + <dependency> + <groupId>org.junit.vintage</groupId> + <artifactId>junit-vintage-engine</artifactId> + <scope>test</scope> + </dependency> </dependencies> <build> <plugins> diff --git a/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java b/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java index 92b7b3bc04d..ec49c1b0eff 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java @@ -6,6 +6,7 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import java.util.Collection; +import java.util.concurrent.atomic.AtomicReference; import static com.yahoo.yolean.Exceptions.uncheck; @@ -15,7 +16,8 @@ import static com.yahoo.yolean.Exceptions.uncheck; * @author hakonhall */ public class JsonNodeRawFlag implements RawFlag { - private static final ObjectMapper mapper = new ObjectMapper(); + + private static final AtomicReference<ObjectMapper> mapper = new AtomicReference<>(); private final JsonNode jsonNode; @@ -24,7 +26,7 @@ public class JsonNodeRawFlag implements RawFlag { } public static JsonNodeRawFlag fromJson(String json) { - return new JsonNodeRawFlag(uncheck(() -> mapper.readTree(json))); + return new JsonNodeRawFlag(uncheck(() -> objectMapper().readTree(json))); } public static JsonNodeRawFlag fromJsonNode(JsonNode jsonNode) { @@ -32,20 +34,20 @@ public class JsonNodeRawFlag implements RawFlag { } public static <T> JsonNodeRawFlag fromJacksonClass(T value) { - return new JsonNodeRawFlag(uncheck(() -> mapper.valueToTree(value))); + return new JsonNodeRawFlag(uncheck(() -> objectMapper().valueToTree(value))); } public <T> T toJacksonClass(Class<T> jacksonClass) { - return uncheck(() -> mapper.treeToValue(jsonNode, jacksonClass)); + return uncheck(() -> objectMapper().treeToValue(jsonNode, jacksonClass)); } public <T> T toJacksonClass(JavaType jacksonClass) { - return uncheck(() -> mapper.readValue(jsonNode.toString(), jacksonClass)); + return uncheck(() -> objectMapper().readValue(jsonNode.toString(), jacksonClass)); } @SuppressWarnings("rawtypes") public static JavaType constructCollectionType(Class<? extends Collection> collectionClass, Class<?> elementClass) { - return mapper.getTypeFactory().constructCollectionType(collectionClass, elementClass); + return objectMapper().getTypeFactory().constructCollectionType(collectionClass, elementClass); } @Override @@ -57,4 +59,14 @@ public class JsonNodeRawFlag implements RawFlag { public String asJson() { return jsonNode.toString(); } + + /** Initialize object mapper lazily */ + private static ObjectMapper objectMapper() { + // ObjectMapper is a heavy-weight object so we construct it only when we need it + return mapper.updateAndGet((objectMapper) -> { + if (objectMapper != null) return objectMapper; + return new ObjectMapper(); + }); + } + } diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java index 52b09a281d5..1b7f0c034a3 100644 --- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java +++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java @@ -163,6 +163,13 @@ public class PermanentFlags { "Takes effect immediately, but any current excess rebuilds will not be cancelled" ); + public static final UnboundListFlag<String> EXTENDED_TRIAL_TENANTS = defineListFlag( + "extended-trial-tenants", List.of(), String.class, + "Tenants that will not be expired from their trial plan", + "Takes effect immediately, used by the CloudTrialExpirer maintainer", + TENANT_ID + ); + private PermanentFlags() {} private static UnboundBooleanFlag defineFeatureFlag( diff --git a/fnet/src/vespa/fnet/connection.cpp b/fnet/src/vespa/fnet/connection.cpp index 47d6a1e429a..4315e76f7ef 100644 --- a/fnet/src/vespa/fnet/connection.cpp +++ b/fnet/src/vespa/fnet/connection.cpp @@ -491,10 +491,10 @@ FNET_Connection::FNET_Connection(FNET_TransportThread *owner, _packetCHID(0), _writeWork(0), _currentID(1), // <-- NB - _input(FNET_READ_SIZE * 2), + _input(0), _queue(256), _myQueue(256), - _output(FNET_WRITE_SIZE * 2), + _output(0), _channels(), _callbackTarget(nullptr), _cleanup(nullptr) @@ -525,10 +525,10 @@ FNET_Connection::FNET_Connection(FNET_TransportThread *owner, _packetCHID(0), _writeWork(0), _currentID(0), - _input(FNET_READ_SIZE * 2), + _input(0), _queue(256), _myQueue(256), - _output(FNET_WRITE_SIZE * 2), + _output(0), _channels(), _callbackTarget(nullptr), _cleanup(nullptr) diff --git a/fnet/src/vespa/fnet/connection.h b/fnet/src/vespa/fnet/connection.h index 3da9b58f928..532bd7c6638 100644 --- a/fnet/src/vespa/fnet/connection.h +++ b/fnet/src/vespa/fnet/connection.h @@ -11,6 +11,7 @@ #include <vespa/vespalib/net/socket_handle.h> #include <vespa/vespalib/net/async_resolver.h> #include <vespa/vespalib/net/crypto_socket.h> +#include <vespa/vespalib/util/size_literals.h> #include <atomic> class FNET_IPacketStreamer; @@ -60,9 +61,9 @@ public: }; enum { - FNET_READ_SIZE = 32768, + FNET_READ_SIZE = 16_Ki, FNET_READ_REDO = 10, - FNET_WRITE_SIZE = 32768, + FNET_WRITE_SIZE = 16_Ki, FNET_WRITE_REDO = 10 }; diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java index 8b6ef83f05e..81a5305a778 100644 --- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java +++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java @@ -115,9 +115,7 @@ public class LinguisticsAnnotator { } return; } - if ( ! token.isIndexable()) { - return; - } + if ( ! token.isIndexable()) return; } String orig = token.getOrig(); int pos = (int)token.getOffset(); @@ -138,9 +136,6 @@ public class LinguisticsAnnotator { String lowercasedTerm = lowercasedOrig; String term = token.getTokenString(); if (term != null) { - term = tokenizer.getReplacementTerm(term); - } - if (term != null) { lowercasedTerm = toLowerCase(term); } if (! lowercasedOrig.equals(lowercasedTerm)) { @@ -155,12 +150,7 @@ public class LinguisticsAnnotator { } } else { String term = token.getTokenString(); - if (term != null) { - term = tokenizer.getReplacementTerm(term); - } - if (term == null || term.trim().isEmpty()) { - return; - } + if (term == null || term.trim().isEmpty()) return; if (termOccurrences.termCountBelowLimit(term)) { parent.span(pos, len).annotate(lowerCaseTermAnnotation(term, token.getOrig())); } diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java index afbcf597a46..5f436720990 100644 --- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java +++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java @@ -19,6 +19,7 @@ import org.junit.Test; import org.mockito.Mockito; import java.util.*; +import java.util.stream.Collectors; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -30,12 +31,6 @@ public class LinguisticsAnnotatorTestCase { private static final AnnotatorConfig CONFIG = new AnnotatorConfig(); - // -------------------------------------------------------------------------------- - // - // Tests - // - // -------------------------------------------------------------------------------- - @Test public void requireThatAnnotateFailsWithZeroTokens() { assertAnnotations(null, "foo"); @@ -145,7 +140,7 @@ public class LinguisticsAnnotatorTestCase { continue; } assertAnnotations(expected, "foo", - newLinguistics(Arrays.asList(newToken("foo", "foo", type, specialToken)), + newLinguistics(List.of(newToken("foo", "foo", type, specialToken)), Collections.singletonMap("foo", "bar"))); } } @@ -159,7 +154,7 @@ public class LinguisticsAnnotatorTestCase { StringFieldValue val = new StringFieldValue("foo"); val.setSpanTree(spanTree); - Linguistics linguistics = newLinguistics(Arrays.asList(newToken("foo", "bar", TokenType.ALPHABETIC, false)), + Linguistics linguistics = newLinguistics(List.of(newToken("foo", "bar", TokenType.ALPHABETIC, false)), Collections.<String, String>emptyMap()); new LinguisticsAnnotator(linguistics, CONFIG).annotate(val); @@ -253,11 +248,15 @@ public class LinguisticsAnnotatorTestCase { private static class MyTokenizer implements Tokenizer { final List<Token> tokens; - final Map<String, String> replacementTerms; public MyTokenizer(List<? extends Token> tokens, Map<String, String> replacementTerms) { - this.tokens = new ArrayList<>(tokens); - this.replacementTerms = replacementTerms; + this.tokens = tokens.stream().map(token -> replace(token, replacementTerms)).collect(Collectors.toList()); + } + + private Token replace(Token token, Map<String, String> replacementTerms) { + var simpleToken = (SimpleToken)token; + simpleToken.setTokenString(replacementTerms.getOrDefault(token.getTokenString(), token.getTokenString())); + return simpleToken; } @Override @@ -265,10 +264,6 @@ public class LinguisticsAnnotatorTestCase { return tokens; } - @Override - public String getReplacementTerm(String term) { - String replacement = replacementTerms.get(term); - return replacement != null ? replacement : term; - } } + } diff --git a/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java b/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java index 7ba83d6718e..09bb584c983 100644 --- a/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java +++ b/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java @@ -51,11 +51,13 @@ public class TlsCryptoSocket implements CryptoSocket { public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) { this.channel = channel; this.sslEngine = sslEngine; + this.wrapBuffer = new Buffer(0); + this.unwrapBuffer = new Buffer(0); SSLSession nullSession = sslEngine.getSession(); - this.wrapBuffer = new Buffer(Math.max(0x8000, nullSession.getPacketBufferSize())); - this.unwrapBuffer = new Buffer(Math.max(0x8000, nullSession.getPacketBufferSize())); + sessionApplicationBufferSize = nullSession.getApplicationBufferSize(); + sessionPacketBufferSize = nullSession.getPacketBufferSize(); // Note: Dummy buffer as unwrap requires a full size application buffer even though no application data is unwrapped - this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize()); + this.handshakeDummyBuffer = ByteBuffer.allocate(sessionApplicationBufferSize); this.handshakeState = HandshakeState.NOT_STARTED; log.fine(() -> "Initialized with " + sslEngine.toString()); } diff --git a/linguistics/src/main/java/com/yahoo/language/process/Token.java b/linguistics/src/main/java/com/yahoo/language/process/Token.java index 73c0ac857ab..70b78ef1a92 100644 --- a/linguistics/src/main/java/com/yahoo/language/process/Token.java +++ b/linguistics/src/main/java/com/yahoo/language/process/Token.java @@ -38,12 +38,12 @@ public interface Token { TokenScript getScript(); /** - * Returns token string in a form suitable for indexing: The - * most lowercased variant of the most processed token form available. + * Returns the token string in a form suitable for indexing: The + * most lowercased variant of the most processed token form available, * If called on a compound token this returns a lowercased form of the * entire word. - * - * @return token string value + * If this is a special token with a configured replacement, + * this will return the replacement token. */ String getTokenString(); diff --git a/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java b/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java index 7e61cd885a8..5be0a6fa635 100644 --- a/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java +++ b/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java @@ -23,16 +23,11 @@ public interface Tokenizer { Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents); /** - * Return a replacement for an input token string. - * This accepts strings returned by Token.getTokenString - * and returns a replacement which will be used as the index token. - * The input token string is returned if there is no replacement. - * <p> - * This default implementation always returns the input token string. + * Not used. * - * @param tokenString the token string of the term to lookup a replacement for - * @return the replacement, if any, or the argument token string if not + * @deprecated replacements are already applied in tokens returned by tokenize */ + @Deprecated // Remove on Vespa 8 default String getReplacementTerm(String tokenString) { return tokenString; } } diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java index 122b9b6dff6..7b63650fa94 100644 --- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java +++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java @@ -25,6 +25,10 @@ public class SimpleToken implements Token { this.orig = orig; } + public SimpleToken(String orig, String tokenString) { + this.orig = orig; + } + @Override public String getOrig() { return orig; diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java index ad9ce84f590..d3ab6464822 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java @@ -77,6 +77,17 @@ public class NodeReports { return new TreeMap<>(reports); } + /** Apply the override to this. null value means removing report. */ + public void updateFromRawMap(Map<String, JsonNode> override) { + override.forEach((reportId, jsonNode) -> { + if (jsonNode == null) { + reports.remove(reportId); + } else { + reports.put(reportId, jsonNode); + } + }); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java index 7408041462c..fa1f8528b31 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java @@ -604,7 +604,7 @@ public class NodeSpec { attributes.getCurrentOsVersion().ifPresent(this::currentOsVersion); attributes.getRebootGeneration().ifPresent(this::currentRebootGeneration); attributes.getRestartGeneration().ifPresent(this::currentRestartGeneration); - NodeReports.fromMap(attributes.getReports()); + this.reports.updateFromRawMap(attributes.getReports()); return this; } diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java index 4c384b09fad..ce8fed0aa70 100644 --- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java +++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java @@ -29,7 +29,10 @@ public class CoreCollector { private static final Pattern CORE_GENERATOR_PATH_PATTERN = Pattern.compile("^Core was generated by `(?<path>.*?)'.$"); private static final Pattern EXECFN_PATH_PATTERN = Pattern.compile("^.* execfn: '(?<path>.*?)'"); private static final Pattern FROM_PATH_PATTERN = Pattern.compile("^.* from '(?<path>.*?)'"); - static final String GDB_PATH = "/opt/rh/devtoolset-9/root/bin/gdb"; + static final String GDB_PATH_RHEL7_DT9 = "/opt/rh/devtoolset-9/root/bin/gdb"; + static final String GDB_PATH_RHEL7_DT10 = "/opt/rh/devtoolset-10/root/bin/gdb"; + static final String GDB_PATH_RHEL8 = "/opt/rh/gcc-toolset-10/root/bin/gdb"; + static final Map<String, Object> JAVA_HEAP_DUMP_METADATA = Map.of("bin_path", "java", "backtrace", List.of("Heap dump, no backtrace available")); @@ -39,8 +42,23 @@ public class CoreCollector { this.docker = docker; } + String getGdbPath(NodeAgentContext context) { + // TODO: Remove when we do not have any devtoolset-9 installs left + String[] command_rhel7_dt9 = {"stat", GDB_PATH_RHEL7_DT9}; + if (docker.executeCommandInContainerAsRoot(context, command_rhel7_dt9).getExitStatus() == 0) { + return GDB_PATH_RHEL7_DT9; + } + + String[] command_rhel7_dt10 = {"stat", GDB_PATH_RHEL7_DT10}; + if (docker.executeCommandInContainerAsRoot(context, command_rhel7_dt10).getExitStatus() == 0) { + return GDB_PATH_RHEL7_DT10; + } + + return GDB_PATH_RHEL8; + } + Path readBinPathFallback(NodeAgentContext context, Path coredumpPath) { - String command = GDB_PATH + " -n -batch -core " + coredumpPath + " | grep \'^Core was generated by\'"; + String command = getGdbPath(context) + " -n -batch -core " + coredumpPath + " | grep \'^Core was generated by\'"; String[] wrappedCommand = {"/bin/sh", "-c", command}; ProcessResult result = docker.executeCommandInContainerAsRoot(context, wrappedCommand); @@ -79,7 +97,7 @@ public class CoreCollector { List<String> readBacktrace(NodeAgentContext context, Path coredumpPath, Path binPath, boolean allThreads) { String threads = allThreads ? "thread apply all bt" : "bt"; - String[] command = {GDB_PATH, "-n", "-ex", threads, "-batch", binPath.toString(), coredumpPath.toString()}; + String[] command = {getGdbPath(context), "-n", "-ex", threads, "-batch", binPath.toString(), coredumpPath.toString()}; ProcessResult result = docker.executeCommandInContainerAsRoot(context, command); if (result.getExitStatus() != 0) diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java index 2827e99c697..d61ab9e53b8 100644 --- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java +++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java @@ -12,7 +12,9 @@ import java.nio.file.Paths; import java.util.List; import java.util.Map; -import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH; +import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL7_DT9; +import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL7_DT10; +import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL8; import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.JAVA_HEAP_DUMP_METADATA; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; @@ -60,9 +62,10 @@ public class CoreCollectorTest { "execfn: '/usr/bin/program', platform: 'x86_64"); assertEquals(TEST_BIN_PATH, coreCollector.readBinPath(context, TEST_CORE_PATH)); + mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output"); Path fallbackResponse = Paths.get("/response/from/fallback"); - mockExec(new String[]{"/bin/sh", "-c", GDB_PATH + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"}, + mockExec(new String[]{"/bin/sh", "-c", GDB_PATH_RHEL7_DT9 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"}, "Core was generated by `/response/from/fallback'."); mockExec(cmd, "/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style"); @@ -74,8 +77,11 @@ public class CoreCollectorTest { @Test public void extractsBinaryPathUsingGdbTest() { + mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory"); + mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "The stat output"); + final String[] cmd = new String[]{"/bin/sh", "-c", - GDB_PATH + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"}; + GDB_PATH_RHEL7_DT10 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"}; mockExec(cmd, "Core was generated by `/usr/bin/program-from-gdb --identity foo/search/cluster.content_'."); assertEquals(Paths.get("/usr/bin/program-from-gdb"), coreCollector.readBinPathFallback(context, TEST_CORE_PATH)); @@ -86,30 +92,34 @@ public class CoreCollectorTest { fail("Expected not to be able to get bin path"); } catch (RuntimeException e) { assertEquals("Failed to extract binary path from GDB, result: ProcessResult { exitStatus=1 output= errors=Error 123 }, command: " + - "[/bin/sh, -c, /opt/rh/devtoolset-9/root/bin/gdb -n -batch -core /tmp/core.1234 | grep '^Core was generated by']", e.getMessage()); + "[/bin/sh, -c, /opt/rh/devtoolset-10/root/bin/gdb -n -batch -core /tmp/core.1234 | grep '^Core was generated by']", e.getMessage()); } } @Test public void extractsBacktraceUsingGdb() { - mockExec(new String[]{GDB_PATH, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, + mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output"); + + mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, String.join("\n", GDB_BACKTRACE)); assertEquals(GDB_BACKTRACE, coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, false)); - mockExec(new String[]{GDB_PATH, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, + mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, "", "Failure"); try { coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, false); fail("Expected not to be able to read backtrace"); } catch (RuntimeException e) { assertEquals("Failed to read backtrace ProcessResult { exitStatus=1 output= errors=Failure }, Command: " + - "[/opt/rh/devtoolset-9/root/bin/gdb, -n, -ex, bt, -batch, /usr/bin/program, /tmp/core.1234]", e.getMessage()); + "[" + GDB_PATH_RHEL7_DT9 + ", -n, -ex, bt, -batch, /usr/bin/program, /tmp/core.1234]", e.getMessage()); } } @Test public void extractsBacktraceFromAllThreadsUsingGdb() { - mockExec(new String[]{GDB_PATH, "-n", "-ex", "thread apply all bt", "-batch", + mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output"); + + mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "thread apply all bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, String.join("\n", GDB_BACKTRACE)); assertEquals(GDB_BACKTRACE, coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, true)); @@ -120,9 +130,11 @@ public class CoreCollectorTest { mockExec(new String[]{"file", TEST_CORE_PATH.toString()}, "/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " + "'/usr/bin/program'"); - mockExec(new String[]{GDB_PATH, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, + mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory"); + mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "", "stat: No such file or directory"); + mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, String.join("\n", GDB_BACKTRACE)); - mockExec(new String[]{GDB_PATH, "-n", "-ex", "thread apply all bt", "-batch", + mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "thread apply all bt", "-batch", "/usr/bin/program", "/tmp/core.1234"}, String.join("\n", GDB_BACKTRACE)); @@ -138,7 +150,8 @@ public class CoreCollectorTest { mockExec(new String[]{"file", TEST_CORE_PATH.toString()}, "/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " + "'/usr/bin/program'"); - mockExec(new String[]{GDB_PATH + " -n -ex bt -batch /usr/bin/program /tmp/core.1234"}, + mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output"); + mockExec(new String[]{GDB_PATH_RHEL7_DT9 + " -n -ex bt -batch /usr/bin/program /tmp/core.1234"}, "", "Failure"); Map<String, Object> expectedData = Map.of("bin_path", TEST_BIN_PATH.toString()); @@ -149,7 +162,11 @@ public class CoreCollectorTest { public void reportsJstackInsteadOfGdbForJdkCores() { mockExec(new String[]{"file", TEST_CORE_PATH.toString()}, "dump.core.5954: ELF 64-bit LSB core file x86-64, version 1 (SYSV), too many program header sections (33172)"); - mockExec(new String[]{"/bin/sh", "-c", GDB_PATH + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"}, + + mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory"); + mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "", "stat: No such file or directory"); + + mockExec(new String[]{"/bin/sh", "-c", GDB_PATH_RHEL8 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"}, "Core was generated by `" + JDK_PATH + " -Dconfig.id=default/container.11 -XX:+Pre'."); String jstack = "jstack11"; diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java index 3b16ecbcaa9..892372f27e7 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java @@ -102,11 +102,13 @@ class MaintenanceDeployment implements Closeable { } private Optional<Mutex> tryLock(ApplicationId application, NodeRepository nodeRepository) { + Duration timeout = Duration.ofSeconds(3); try { // Use a short lock to avoid interfering with change deployments - return Optional.of(nodeRepository.nodes().lock(application, Duration.ofSeconds(1))); + return Optional.of(nodeRepository.nodes().lock(application, timeout)); } catch (ApplicationLockException e) { + log.log(Level.WARNING, () -> "Could not lock " + application + " for maintenance deployment within " + timeout); return Optional.empty(); } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java index cdb5202603a..79d6fbfbdcd 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java @@ -67,7 +67,6 @@ public class NodeRepositoryMaintenance extends AbstractComponent { maintainers.add(new ScalingSuggestionsMaintainer(nodeRepository, defaults.scalingSuggestionsInterval, metric)); maintainers.add(new SwitchRebalancer(nodeRepository, defaults.switchRebalancerInterval, metric, deployer)); maintainers.add(new HostEncrypter(nodeRepository, defaults.hostEncrypterInterval, metric)); - maintainers.add(new ParkedExpirer(nodeRepository, defaults.parkedExpirerInterval, metric)); provisionServiceProvider.getLoadBalancerService(nodeRepository) .map(lbService -> new LoadBalancerExpirer(nodeRepository, defaults.loadBalancerExpirerInterval, lbService, metric)) @@ -120,7 +119,6 @@ public class NodeRepositoryMaintenance extends AbstractComponent { private final Duration scalingSuggestionsInterval; private final Duration switchRebalancerInterval; private final Duration hostEncrypterInterval; - private final Duration parkedExpirerInterval; private final NodeFailer.ThrottlePolicy throttlePolicy; @@ -129,7 +127,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent { dynamicProvisionerInterval = Duration.ofMinutes(5); failedExpirerInterval = Duration.ofMinutes(10); failGrace = Duration.ofMinutes(30); - infrastructureProvisionInterval = Duration.ofMinutes(1); + infrastructureProvisionInterval = Duration.ofMinutes(3); loadBalancerExpirerInterval = Duration.ofMinutes(5); metricsInterval = Duration.ofMinutes(1); nodeFailerInterval = Duration.ofMinutes(15); @@ -151,11 +149,10 @@ public class NodeRepositoryMaintenance extends AbstractComponent { throttlePolicy = NodeFailer.ThrottlePolicy.hosted; inactiveConfigServerExpiry = Duration.ofMinutes(5); inactiveControllerExpiry = Duration.ofMinutes(5); - parkedExpirerInterval = Duration.ofMinutes(30); if (zone.environment().isProduction() && ! zone.system().isCd()) { inactiveExpiry = Duration.ofHours(4); // enough time for the application owner to discover and redeploy - retiredInterval = Duration.ofMinutes(30); + retiredInterval = Duration.ofMinutes(15); dirtyExpiry = Duration.ofHours(2); // enough time to clean the node retiredExpiry = Duration.ofDays(4); // give up migrating data after 4 days } else { diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirer.java deleted file mode 100644 index ec7826658e3..00000000000 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirer.java +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package com.yahoo.vespa.hosted.provision.maintenance; - -import com.yahoo.config.provision.NodeType; -import com.yahoo.jdisc.Metric; -import com.yahoo.vespa.hosted.provision.Node; -import com.yahoo.vespa.hosted.provision.NodeList; -import com.yahoo.vespa.hosted.provision.NodeRepository; -import com.yahoo.vespa.hosted.provision.node.Agent; -import com.yahoo.vespa.hosted.provision.node.History; - -import java.time.Duration; -import java.time.Instant; -import java.util.Comparator; -import java.util.logging.Logger; - -/** - * - * Expires parked nodes in dynamically provisioned zones. - * If number of parked hosts exceed MAX_ALLOWED_PARKED_HOSTS, recycle in a queue order - * - * @author olaa - */ -public class ParkedExpirer extends NodeRepositoryMaintainer { - - private static final int MAX_ALLOWED_PARKED_HOSTS = 20; - private static final Logger log = Logger.getLogger(ParkedExpirer.class.getName()); - - private final NodeRepository nodeRepository; - - ParkedExpirer(NodeRepository nodeRepository, Duration interval, Metric metric) { - super(nodeRepository, interval, metric); - this.nodeRepository = nodeRepository; - } - - @Override - protected double maintain() { - if (!nodeRepository.zone().getCloud().dynamicProvisioning()) - return 1.0; - - NodeList parkedHosts = nodeRepository.nodes() - .list(Node.State.parked) - .nodeType(NodeType.host) - .not().deprovisioning(); - int hostsToExpire = Math.max(0, parkedHosts.size() - MAX_ALLOWED_PARKED_HOSTS); - parkedHosts.sortedBy(Comparator.comparing(this::parkedAt)) - .first(hostsToExpire) - .forEach(host -> { - log.info("Allowed number of parked nodes exceeded. Recycling " + host.hostname()); - nodeRepository.nodes().deallocate(host, Agent.ParkedExpirer, "Expired by ParkedExpirer"); - }); - - return 1.0; - } - - private Instant parkedAt(Node node) { - return node.history().event(History.Event.Type.parked) - .map(History.Event::at) - .orElse(Instant.EPOCH); // Should not happen - } - -} diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java index 856d534bbd2..76c8210338e 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java @@ -4,15 +4,18 @@ package com.yahoo.vespa.hosted.provision.maintenance; import com.yahoo.config.provision.NodeType; import com.yahoo.jdisc.Metric; import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.NodeList; import com.yahoo.vespa.hosted.provision.NodeRepository; import com.yahoo.vespa.hosted.provision.node.Agent; import com.yahoo.vespa.hosted.provision.node.History; import java.time.Duration; +import java.time.Instant; import java.util.List; /** * This moves nodes of type {@link NodeType#host} from provisioned to parked if they have been in provisioned too long. + * Parked hosts are deprovisioned as well, if too many hosts are being expired. * * Only {@link NodeType#host} is moved because any number of nodes of that type can exist. Other node types such as * {@link NodeType#confighost} have a fixed number and thus cannot be replaced while the fixed number of nodes exist in @@ -22,17 +25,40 @@ import java.util.List; */ public class ProvisionedExpirer extends Expirer { + private final NodeRepository nodeRepository; + private static final int MAXIMUM_ALLOWED_EXPIRED_HOSTS = 20; + ProvisionedExpirer(NodeRepository nodeRepository, Duration dirtyTimeout, Metric metric) { super(Node.State.provisioned, History.Event.Type.provisioned, nodeRepository, dirtyTimeout, metric); + this.nodeRepository = nodeRepository; } @Override protected void expire(List<Node> expired) { + int previouslyExpired = numberOfPreviouslyExpired(); for (Node expiredNode : expired) { - if (expiredNode.type() == NodeType.host) { - nodeRepository().nodes().parkRecursively(expiredNode.hostname(), Agent.ProvisionedExpirer, "Node is stuck in provisioned"); + if (expiredNode.type() != NodeType.host) + continue; + nodeRepository().nodes().parkRecursively(expiredNode.hostname(), Agent.ProvisionedExpirer, "Node is stuck in provisioned"); + if (MAXIMUM_ALLOWED_EXPIRED_HOSTS < ++previouslyExpired) { + nodeRepository.nodes().deprovision(expiredNode.hostname(), Agent.ProvisionedExpirer, nodeRepository.clock().instant()); } } } + private int numberOfPreviouslyExpired() { + return nodeRepository.nodes() + .list(Node.State.parked) + .nodeType(NodeType.host) + .matching(this::parkedByProvisionedExpirer) + .not().deprovisioning() + .size(); + } + + private boolean parkedByProvisionedExpirer(Node node) { + return node.history().event(History.Event.Type.parked) + .map(History.Event::agent) + .map(Agent.ProvisionedExpirer::equals) + .orElse(false); + } } diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java index ec1bfba6996..4d67c83a179 100644 --- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java +++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java @@ -516,6 +516,8 @@ public class Nodes { public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); + if (node.status().wantToRebuild()) + throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirerTest.java deleted file mode 100644 index bc60801c1d6..00000000000 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ParkedExpirerTest.java +++ /dev/null @@ -1,71 +0,0 @@ -package com.yahoo.vespa.hosted.provision.maintenance; - -import com.yahoo.config.provision.Cloud; -import com.yahoo.config.provision.Environment; -import com.yahoo.config.provision.Flavor; -import com.yahoo.config.provision.NodeResources; -import com.yahoo.config.provision.NodeType; -import com.yahoo.config.provision.RegionName; -import com.yahoo.config.provision.SystemName; -import com.yahoo.config.provision.Zone; -import com.yahoo.vespa.hosted.provision.Node; -import com.yahoo.vespa.hosted.provision.node.Agent; -import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester; -import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner; -import org.junit.Test; - -import java.time.Duration; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static org.junit.Assert.*; - -/** - * @author olaa - */ -public class ParkedExpirerTest { - - private ProvisioningTester tester; - - @Test - public void noop_if_not_dynamic_provisioning() { - tester = getTester(false); - populateNodeRepo(); - - var expirer = new ParkedExpirer(tester.nodeRepository(), Duration.ofMinutes(4), new TestMetric()); - expirer.maintain(); - - assertEquals(0, tester.nodeRepository().nodes().list(Node.State.dirty).size()); - assertEquals(25, tester.nodeRepository().nodes().list(Node.State.parked).size()); - } - - @Test - public void recycles_correct_subset_of_parked_hosts() { - tester = getTester(true); - populateNodeRepo(); - - var expirer = new ParkedExpirer(tester.nodeRepository(), Duration.ofMinutes(4), new TestMetric()); - expirer.maintain(); - - assertEquals(4, tester.nodeRepository().nodes().list(Node.State.dirty).size()); - assertEquals(21, tester.nodeRepository().nodes().list(Node.State.parked).size()); - - } - - private ProvisioningTester getTester(boolean dynamicProvisioning) { - var zone = new Zone(Cloud.builder().dynamicProvisioning(dynamicProvisioning).build(), SystemName.main, Environment.prod, RegionName.from("us-east")); - return new ProvisioningTester.Builder().zone(zone) - .hostProvisioner(dynamicProvisioning ? new MockHostProvisioner(List.of()) : null) - .build(); - } - - private void populateNodeRepo() { - var nodes = IntStream.range(0, 25) - .mapToObj(i -> Node.create("id-" + i, "host-" + i, new Flavor(NodeResources.unspecified()), Node.State.parked, NodeType.host).build()) - .collect(Collectors.toList()); - tester.nodeRepository().database().addNodesInState(nodes, Node.State.parked, Agent.system); - tester.nodeRepository().nodes().deprovision(nodes.get(0).hostname(), Agent.system, tester.clock().instant()); // Deprovisioning host is not recycled - } - -} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java new file mode 100644 index 00000000000..786faae24b4 --- /dev/null +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java @@ -0,0 +1,50 @@ +package com.yahoo.vespa.hosted.provision.maintenance; + +import com.yahoo.config.provision.Cloud; +import com.yahoo.config.provision.Environment; +import com.yahoo.config.provision.Flavor; +import com.yahoo.config.provision.NodeResources; +import com.yahoo.config.provision.NodeType; +import com.yahoo.config.provision.RegionName; +import com.yahoo.config.provision.SystemName; +import com.yahoo.config.provision.Zone; +import com.yahoo.vespa.hosted.provision.Node; +import com.yahoo.vespa.hosted.provision.node.Agent; +import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester; +import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner; +import org.junit.Test; + +import java.time.Duration; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.junit.Assert.*; + +/** + * @author olaa + */ +public class ProvisionedExpirerTest { + + private ProvisioningTester tester; + + @Test + public void deprovisions_hosts_if_excessive_expiry() { + tester = new ProvisioningTester.Builder().build(); + populateNodeRepo(); + + tester.clock().advance(Duration.ofMinutes(5)); + new ProvisionedExpirer(tester.nodeRepository(), Duration.ofMinutes(4), new TestMetric()).maintain(); + + assertEquals(5, tester.nodeRepository().nodes().list().deprovisioning().size()); + assertEquals(20, tester.nodeRepository().nodes().list().not().deprovisioning().size()); + } + + private void populateNodeRepo() { + var nodes = IntStream.range(0, 25) + .mapToObj(i -> Node.create("id-" + i, "host-" + i, new Flavor(NodeResources.unspecified()), Node.State.provisioned, NodeType.host).build()) + .collect(Collectors.toList()); + tester.nodeRepository().database().addNodesInState(nodes, Node.State.provisioned, Agent.system); + } + +} diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java index a28c11d009f..dd16d4674ad 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java @@ -243,6 +243,13 @@ public class NodesV2ApiTest { new byte[0], Request.Method.DELETE), "{\"message\":\"Removed dockerhost1.yahoo.com\"}"); // ... and then forget it completely + tester.assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", + new byte[0], Request.Method.DELETE), + 400, + "{\"error-code\":\"BAD_REQUEST\",\"message\":\"deprovisioned host dockerhost1.yahoo.com is rebuilding and cannot be forgotten\"}"); + assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", + Utf8.toBytes("{\"wantToRebuild\": false}"), Request.Method.PATCH), + "{\"message\":\"Updated dockerhost1.yahoo.com\"}"); assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com", new byte[0], Request.Method.DELETE), "{\"message\":\"Permanently removed dockerhost1.yahoo.com\"}"); diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json index 26d711945c6..72224ef3cba 100644 --- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json +++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json @@ -43,9 +43,6 @@ "name": "OsUpgradeActivator" }, { - "name": "ParkedExpirer" - }, - { "name": "PeriodicApplicationMaintainer" }, { diff --git a/screwdriver/build-vespa.sh b/screwdriver/build-vespa.sh index 4480b33e6f9..91375728ca9 100755 --- a/screwdriver/build-vespa.sh +++ b/screwdriver/build-vespa.sh @@ -6,7 +6,7 @@ set -e readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd )" readonly NUM_THREADS=$(( $(nproc) + 2 )) -source /etc/profile.d/enable-devtoolset-9.sh +source /etc/profile.d/enable-devtoolset-10.sh source /etc/profile.d/enable-rh-maven35.sh export MALLOC_ARENA_MAX=1 @@ -52,9 +52,9 @@ esac if [[ $SHOULD_BUILD == systemtest ]]; then yum -y --setopt=skip_missing_names_on_install=False install \ zstd \ - devtoolset-9-gcc-c++ \ - devtoolset-9-libatomic-devel \ - devtoolset-9-binutils \ + devtoolset-10-gcc-c++ \ + devtoolset-10-libatomic-devel \ + devtoolset-10-binutils \ libxml2-devel \ rh-ruby27-rubygems-devel \ rh-ruby27-ruby-devel \ diff --git a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp index 463a7b164e1..3013e8f38d1 100644 --- a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp +++ b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp @@ -19,6 +19,8 @@ #include <vespa/searchcore/proton/server/document_db_explorer.h> #include <vespa/searchcore/proton/server/documentdb.h> #include <vespa/searchcore/proton/server/documentdbconfigmanager.h> +#include <vespa/searchcore/proton/server/feedhandler.h> +#include <vespa/searchcore/proton/server/fileconfigmanager.h> #include <vespa/searchcore/proton/server/memoryconfigstore.h> #include <vespa/persistence/dummyimpl/dummy_bucket_executor.h> #include <vespa/searchcorespi/index/indexflushtarget.h> @@ -28,7 +30,10 @@ #include <vespa/vespalib/data/slime/slime.h> #include <vespa/vespalib/util/size_literals.h> #include <vespa/config-bucketspaces.h> +#include <vespa/vespalib/io/fileutil.h> +#include <vespa/vespalib/stllike/asciistream.h> #include <vespa/vespalib/testkit/test_kit.h> +#include <iostream> using namespace cloud::config::filedistribution; using namespace proton; @@ -39,6 +44,7 @@ using document::DocumentType; using document::DocumentTypeRepo; using document::DocumenttypesConfig; using document::test::makeBucketSpace; +using search::SerialNum; using search::TuneFileDocumentDB; using search::index::DummyFileHeaderContext; using search::index::Schema; @@ -51,6 +57,24 @@ using vespalib::Slime; namespace { +void +cleanup_dirs(bool file_config) +{ + vespalib::rmdir("typea", true); + vespalib::rmdir("tmp", true); + if (file_config) { + vespalib::rmdir("config", true); + } +} + +vespalib::string +config_subdir(SerialNum serialNum) +{ + vespalib::asciistream os; + os << "config/config-" << serialNum; + return os.str(); +} + struct MyDBOwner : public DummyDBOwner { std::shared_ptr<DocumentDBReferenceRegistry> _registry; @@ -67,7 +91,30 @@ MyDBOwner::MyDBOwner() {} MyDBOwner::~MyDBOwner() = default; -struct Fixture { +struct FixtureBase { + bool _cleanup; + bool _file_config; + FixtureBase(bool file_config); + ~FixtureBase(); + void disable_cleanup() { _cleanup = false; } +}; + +FixtureBase::FixtureBase(bool file_config) + : _cleanup(true), + _file_config(file_config) +{ + vespalib::mkdir("typea"); +} + + +FixtureBase::~FixtureBase() +{ + if (_cleanup) { + cleanup_dirs(_file_config); + } +} + +struct Fixture : public FixtureBase { DummyWireService _dummy; MyDBOwner _myDBOwner; vespalib::ThreadStackExecutor _summaryExecutor; @@ -79,12 +126,20 @@ struct Fixture { matching::QueryLimiter _queryLimiter; vespalib::Clock _clock; + std::unique_ptr<ConfigStore> make_config_store(); Fixture(); + Fixture(bool file_config); ~Fixture(); }; Fixture::Fixture() - : _dummy(), + : Fixture(false) +{ +} + +Fixture::Fixture(bool file_config) + : FixtureBase(file_config), + _dummy(), _myDBOwner(), _summaryExecutor(8, 128_Ki), _hwInfo(), @@ -111,13 +166,25 @@ Fixture::Fixture() _db = DocumentDB::create(".", mgr.getConfig(), "tcp/localhost:9014", _queryLimiter, _clock, DocTypeName("typea"), makeBucketSpace(), *b->getProtonConfigSP(), _myDBOwner, _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _dummy, - _fileHeaderContext, std::make_unique<MemoryConfigStore>(), + _fileHeaderContext, make_config_store(), std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), _hwInfo); _db->start(); _db->waitForOnlineState(); } -Fixture::~Fixture() = default; +Fixture::~Fixture() +{ +} + +std::unique_ptr<ConfigStore> +Fixture::make_config_store() +{ + if (_file_config) { + return std::make_unique<FileConfigManager>("config", "", "typea"); + } else { + return std::make_unique<MemoryConfigStore>(); + } +} const IFlushTarget * extractRealFlushTarget(const IFlushTarget *target) @@ -249,11 +316,56 @@ TEST_F("require that document db registers reference", Fixture) EXPECT_EQUAL(search::attribute::BasicType::INT32, attrReadGuard->attribute()->getBasicType()); } +TEST("require that normal restart works") +{ + { + Fixture f(true); + f.disable_cleanup(); + } + { + Fixture f(true); + } +} + +TEST("require that resume after interrupted save config works") +{ + SerialNum serialNum = 0; + { + Fixture f(true); + f.disable_cleanup(); + serialNum = f._db->getFeedHandler().getSerialNum(); + } + { + /* + * Simulate interrupted save config by copying best config to + * serial number after end of transaction log + */ + std::cout << "Replay end serial num is " << serialNum << std::endl; + search::IndexMetaInfo info("config"); + ASSERT_TRUE(info.load()); + auto best_config_snapshot = info.getBestSnapshot(); + ASSERT_TRUE(best_config_snapshot.valid); + std::cout << "Best config serial is " << best_config_snapshot.syncToken << std::endl; + auto old_config_subdir = config_subdir(best_config_snapshot.syncToken); + auto new_config_subdir = config_subdir(serialNum + 1); + vespalib::mkdir(new_config_subdir); + auto config_files = vespalib::listDirectory(old_config_subdir); + for (auto &config_file : config_files) { + vespalib::copy(old_config_subdir + "/" + config_file, new_config_subdir + "/" + config_file, false, false); + } + info.addSnapshot({true, serialNum + 1, new_config_subdir.substr(new_config_subdir.rfind('/') + 1)}); + info.save(); + } + { + Fixture f(true); + } +} + } // namespace TEST_MAIN() { + cleanup_dirs(true); DummyFileHeaderContext::setCreator("documentdb_test"); - FastOS_File::MakeDirectory("typea"); TEST_RUN_ALL(); - FastOS_FileInterface::EmptyAndRemoveDirectory("typea"); + cleanup_dirs(true); } diff --git a/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp b/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp index cff44631c6c..f918ebe9179 100644 --- a/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp +++ b/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp @@ -159,7 +159,7 @@ TEST_F("require that more disk bloat is allowed while node state is retired", Fi f.notifyDiskMemUsage(ResourceUsageState(0.7, 0.3), belowLimit()); TEST_DO(f.assertStrategyDiskConfig(0.2, 0.2)); f.setNodeRetired(true); - TEST_DO(f.assertStrategyDiskConfig((0.8 - 0.3 / 0.7) * 0.8, 1.0)); + TEST_DO(f.assertStrategyDiskConfig((0.8 - ((0.3/0.7)*(1 - 0.2))) / 0.8, 1.0)); f.notifyDiskMemUsage(belowLimit(), belowLimit()); TEST_DO(f.assertStrategyDiskConfig(0.2, 0.2)); } diff --git a/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp b/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp index 55e9ce16f70..01dd069b03c 100644 --- a/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp +++ b/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp @@ -4,17 +4,15 @@ #include <vespa/searchlib/aggregation/predicates.h> #include <vespa/searchlib/aggregation/modifiers.h> -namespace search { +namespace search::grouping { using aggregation::CountFS4Hits; using aggregation::FS4HitSetDistributionKey; -namespace grouping { - void GroupingContext::deserialize(const char *groupSpec, uint32_t groupSpecLen) { - if ((groupSpec != NULL) && (groupSpecLen > 4)) { + if ((groupSpec != nullptr) && (groupSpecLen > 4)) { vespalib::nbostream is(groupSpec, groupSpecLen); vespalib::NBOSerializer nis(is); uint32_t numGroupings = 0; @@ -102,6 +100,4 @@ GroupingContext::needRanking() const return true; } - -} // namespace search::grouping -} // namespace search +} diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp index aa633536419..e53e817af8d 100644 --- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp @@ -632,8 +632,9 @@ DocumentDB::saveInitialConfig(const DocumentDBConfig &configSnapshot) // Only called from ctor lock_guard guard(_configMutex); - if (_config_store->getBestSerialNum() != 0) + if (_config_store->getBestSerialNum() != 0) { return; // Initial config already present + } SerialNum confSerial = _feedHandler->inc_replay_end_serial_num(); _feedHandler->setSerialNum(confSerial); @@ -658,16 +659,17 @@ void DocumentDB::resumeSaveConfig() { SerialNum bestSerial = _config_store->getBestSerialNum(); - if (bestSerial == 0) - return; - if (bestSerial != _feedHandler->get_replay_end_serial_num() + 1) + assert(bestSerial != 0); + if (bestSerial != _feedHandler->get_replay_end_serial_num() + 1) { return; + } + LOG(warning, "DocumentDB(%s): resumeSaveConfig() resuming save config for serial %" PRIu64, + _docTypeName.toString().c_str(), bestSerial); // proton was interrupted when saving later config. SerialNum confSerial = _feedHandler->inc_replay_end_serial_num(); - _feedHandler->setSerialNum(confSerial); + assert(confSerial == bestSerial); // resume operation, i.e. save config entry in transaction log NewConfigOperation op(confSerial, *_config_store); - op.setSerialNum(_feedHandler->inc_replay_end_serial_num()); (void) _feedHandler->storeOperationSync(op); sync(op.getSerialNum()); } diff --git a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp index 4b862b40896..04aea64fbd4 100644 --- a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp @@ -4,8 +4,6 @@ #include "bootstrapconfig.h" #include <vespa/searchcore/proton/common/hw_info_sampler.h> #include <vespa/config/print/fileconfigwriter.h> -#include <vespa/config/print/fileconfigsnapshotreader.h> -#include <vespa/config/print/fileconfigsnapshotwriter.h> #include <vespa/config-bucketspaces.h> #include <vespa/document/repo/document_type_repo_factory.h> #include <vespa/searchcommon/common/schemaconfigurer.h> @@ -42,7 +40,8 @@ using vespa::config::search::summary::JuniperrcConfig; using vespa::config::content::core::BucketspacesConfig; using vespalib::nbostream; -typedef IndexMetaInfo::SnapshotList SnapshotList; +using SnapshotList = IndexMetaInfo::SnapshotList; +using Snapshot = IndexMetaInfo::Snapshot; using namespace std::chrono_literals; namespace proton { @@ -74,9 +73,7 @@ fsyncFile(const vespalib::string &fileName) template <class Config> void -saveHelper(const vespalib::string &snapDir, - const vespalib::string &name, - const Config &config) +saveHelper(const vespalib::string &snapDir, const vespalib::string &name, const Config &config) { vespalib::string fileName(snapDir + "/" + name + ".cfg"); config::FileConfigWriter writer(fileName); @@ -105,8 +102,7 @@ public: ConfigFile(); ~ConfigFile(); - ConfigFile(const vespalib::string &name, - const vespalib::string &fullName); + ConfigFile(const vespalib::string &name, const vespalib::string &fullName); nbostream &serialize(nbostream &stream) const; nbostream &deserialize(nbostream &stream); @@ -122,8 +118,7 @@ ConfigFile::ConfigFile() ConfigFile::~ConfigFile() = default; -ConfigFile::ConfigFile(const vespalib::string &name, - const vespalib::string &fullName) +ConfigFile::ConfigFile(const vespalib::string &name, const vespalib::string &fullName) : _name(name), _modTime(0), _content() @@ -142,7 +137,7 @@ ConfigFile::ConfigFile(const vespalib::string &name, nbostream & ConfigFile::serialize(nbostream &stream) const { - assert(strchr(_name.c_str(), '/') == NULL); + assert(strchr(_name.c_str(), '/') == nullptr); stream << _name; stream << static_cast<int64_t>(_modTime);; uint32_t sz = _content.size(); @@ -155,7 +150,7 @@ nbostream & ConfigFile::deserialize(nbostream &stream) { stream >> _name; - assert(strchr(_name.c_str(), '/') == NULL); + assert(strchr(_name.c_str(), '/') == nullptr); int64_t modTime; stream >> modTime; _modTime = modTime; @@ -255,8 +250,7 @@ FileConfigManager::getOldestSerialNum() const } void -FileConfigManager::saveConfig(const DocumentDBConfig &snapshot, - SerialNum serialNum) +FileConfigManager::saveConfig(const DocumentDBConfig &snapshot, SerialNum serialNum) { if (getBestSerialNum() >= serialNum) { LOG(warning, "Config for serial >= %" PRIu64 " already saved", @@ -318,8 +312,7 @@ void addEmptyFile(vespalib::string snapDir, vespalib::string fileName) } void -FileConfigManager::loadConfig(const DocumentDBConfig ¤tSnapshot, - search::SerialNum serialNum, +FileConfigManager::loadConfig(const DocumentDBConfig ¤tSnapshot, search::SerialNum serialNum, DocumentDBConfig::SP &loadedSnapshot) { vespalib::string snapDirBaseName(makeSnapDirBaseName(serialNum)); @@ -333,13 +326,14 @@ FileConfigManager::loadConfig(const DocumentDBConfig ¤tSnapshot, DocumentDBConfigHelper dbc(spec, _docTypeName); - typedef DocumenttypesConfig DTC; - typedef DocumentDBConfig::DocumenttypesConfigSP DTCSP; - DTCSP docTypesCfg(config::ConfigGetter<DTC>::getConfig("", spec).release()); + using DTC = DocumenttypesConfig; + using DTCSP = DocumentDBConfig::DocumenttypesConfigSP; + DTCSP docTypesCfg = config::ConfigGetter<DTC>::getConfig("", spec); std::shared_ptr<const DocumentTypeRepo> repo; if (currentSnapshot.getDocumenttypesConfigSP() && currentSnapshot.getDocumentTypeRepoSP() && - currentSnapshot.getDocumenttypesConfig() == *docTypesCfg) { + (currentSnapshot.getDocumenttypesConfig() == *docTypesCfg)) + { docTypesCfg = currentSnapshot.getDocumenttypesConfigSP(); repo = currentSnapshot.getDocumentTypeRepoSP(); } else { @@ -462,8 +456,7 @@ FileConfigManager::serializeConfig(SerialNum serialNum, nbostream &stream) uint32_t numConfigs = configs.size(); stream << numConfigs; for (const auto &config : configs) { - ConfigFile file(config, - snapDir + "/" + config); + ConfigFile file(config, snapDir + "/" + config); stream << file; } } diff --git a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h index 1c477ffd3c8..d58d7920c67 100644 --- a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h +++ b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h @@ -10,17 +10,12 @@ namespace proton { class FileConfigManager : public ConfigStore { -public: - typedef std::unique_ptr<FileConfigManager> UP; - typedef std::shared_ptr<FileConfigManager> SP; - typedef search::IndexMetaInfo::Snapshot Snapshot; - private: - vespalib::string _baseDir; - vespalib::string _configId; - vespalib::string _docTypeName; + vespalib::string _baseDir; + vespalib::string _configId; + vespalib::string _docTypeName; search::IndexMetaInfo _info; - ProtonConfigSP _protonConfig; + ProtonConfigSP _protonConfig; public: /** @@ -33,14 +28,12 @@ public: const vespalib::string &configId, const vespalib::string &docTypeName); - virtual - ~FileConfigManager(); + ~FileConfigManager() override; - virtual SerialNum getBestSerialNum() const override; - virtual SerialNum getOldestSerialNum() const override; + SerialNum getBestSerialNum() const override; + SerialNum getOldestSerialNum() const override; - virtual void saveConfig(const DocumentDBConfig &snapshot, - SerialNum serialNum) override; + void saveConfig(const DocumentDBConfig &snapshot, SerialNum serialNum) override; /** * Load a config snapshot from disk corresponding to the given @@ -53,23 +46,21 @@ public: * @param loadedSnapshot the shared pointer in which to store the * resulting config snapshot. */ - virtual void loadConfig(const DocumentDBConfig ¤tSnapshot, - SerialNum serialNum, - DocumentDBConfig::SP &loadedSnapshot) override; + void loadConfig(const DocumentDBConfig ¤tSnapshot, SerialNum serialNum, + DocumentDBConfig::SP &loadedSnapshot) override; - virtual void removeInvalid() override; - virtual void prune(SerialNum serialNum) override; - virtual bool hasValidSerial(SerialNum serialNum) const override; + void removeInvalid() override; + void prune(SerialNum serialNum) override; + bool hasValidSerial(SerialNum serialNum) const override; - virtual SerialNum getPrevValidSerial(SerialNum serialNum) const override; + SerialNum getPrevValidSerial(SerialNum serialNum) const override; /** * Serialize config files. * * Used for serializing config into transaction log. */ - virtual void - serializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override; + void serializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override; /** @@ -80,10 +71,9 @@ public: * takes precedence over the serialized config files in the * transaction log. */ - virtual void - deserializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override; + void deserializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override; - virtual void setProtonConfig(const ProtonConfigSP &protonConfig) override; + void setProtonConfig(const ProtonConfigSP &protonConfig) override; }; } // namespace proton diff --git a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp index 88e2096aa63..cf51c7be518 100644 --- a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp +++ b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp @@ -13,7 +13,8 @@ namespace { bool shouldUseConservativeMode(const ResourceUsageState &resourceState, bool currentlyUseConservativeMode, - double lowWatermarkFactor) { + double lowWatermarkFactor) +{ return resourceState.aboveLimit() || (currentlyUseConservativeMode && resourceState.aboveLimit(lowWatermarkFactor)); } @@ -21,8 +22,7 @@ shouldUseConservativeMode(const ResourceUsageState &resourceState, } void -MemoryFlushConfigUpdater::considerUseConservativeDiskMode(const LockGuard &guard, - MemoryFlush::Config &newConfig) +MemoryFlushConfigUpdater::considerUseConservativeDiskMode(const LockGuard &guard, MemoryFlush::Config &newConfig) { if (shouldUseConservativeMode(_currState.diskState(), _useConservativeDiskMode, _currConfig.conservative.lowwatermarkfactor)) @@ -38,8 +38,7 @@ MemoryFlushConfigUpdater::considerUseConservativeDiskMode(const LockGuard &guard } void -MemoryFlushConfigUpdater::considerUseConservativeMemoryMode(const LockGuard &, - MemoryFlush::Config &newConfig) +MemoryFlushConfigUpdater::considerUseConservativeMemoryMode(const LockGuard &, MemoryFlush::Config &newConfig) { if (shouldUseConservativeMode(_currState.memoryState(), _useConservativeMemoryMode, _currConfig.conservative.lowwatermarkfactor)) @@ -59,18 +58,29 @@ MemoryFlushConfigUpdater::considerUseRelaxedDiskMode(const LockGuard &, MemoryFl double bloatMargin = _currConfig.conservative.lowwatermarkfactor - utilization; if (bloatMargin > 0.0) { // Node retired and disk utiliation is below low mater mark factor. + // Compute how much of disk is occupied by live data, give that bloat is maxed, + // which is normally the case in a system that has been running for a while. + double spaceUtilization = utilization * (1 - _currConfig.diskbloatfactor); + // Then compute how much bloat can allowed given the current space usage and still stay below low watermark + double targetBloat = (_currConfig.conservative.lowwatermarkfactor - spaceUtilization) / _currConfig.conservative.lowwatermarkfactor; newConfig.diskBloatFactor = 1.0; - newConfig.globalDiskBloatFactor = std::max(bloatMargin * 0.8, _currConfig.diskbloatfactor); + newConfig.globalDiskBloatFactor = std::max(targetBloat, _currConfig.diskbloatfactor); } } void -MemoryFlushConfigUpdater::updateFlushStrategy(const LockGuard &guard) +MemoryFlushConfigUpdater::updateFlushStrategy(const LockGuard &guard, const char * why) { MemoryFlush::Config newConfig = convertConfig(_currConfig, _memory); considerUseConservativeDiskMode(guard, newConfig); considerUseConservativeMemoryMode(guard, newConfig); _flushStrategy->setConfig(newConfig); + LOG(info, "Due to %s (conservative-disk=%d, conservative-memory=%d, retired=%d) flush config updated to " + "global-disk-bloat(%1.2f), max-tls-size(%" PRIu64 ")," + "max-global-memory(%" PRIu64 "), max-memory-gain(%" PRIu64 ")", + why, _useConservativeDiskMode, _useConservativeMemoryMode, _nodeRetired, + newConfig.globalDiskBloatFactor, newConfig.maxGlobalTlsSize, + newConfig.maxGlobalMemory, newConfig.maxMemoryGain); } MemoryFlushConfigUpdater::MemoryFlushConfigUpdater(const MemoryFlush::SP &flushStrategy, @@ -92,7 +102,7 @@ MemoryFlushConfigUpdater::setConfig(const ProtonConfig::Flush::Memory &newConfig { LockGuard guard(_mutex); _currConfig = newConfig; - updateFlushStrategy(guard); + updateFlushStrategy(guard, "new config"); } void @@ -100,7 +110,7 @@ MemoryFlushConfigUpdater::notifyDiskMemUsage(DiskMemUsageState newState) { LockGuard guard(_mutex); _currState = newState; - updateFlushStrategy(guard); + updateFlushStrategy(guard, "disk-mem-usage update"); } void @@ -108,7 +118,7 @@ MemoryFlushConfigUpdater::setNodeRetired(bool nodeRetired) { LockGuard guard(_mutex); _nodeRetired = nodeRetired; - updateFlushStrategy(guard); + updateFlushStrategy(guard, nodeRetired ? "node retired" : "node unretired"); } namespace { @@ -122,8 +132,7 @@ getHardMemoryLimit(const HwInfo::Memory &memory) } MemoryFlush::Config -MemoryFlushConfigUpdater::convertConfig(const ProtonConfig::Flush::Memory &config, - const HwInfo::Memory &memory) +MemoryFlushConfigUpdater::convertConfig(const ProtonConfig::Flush::Memory &config, const HwInfo::Memory &memory) { const size_t hardMemoryLimit = getHardMemoryLimit(memory); size_t totalMaxMemory = config.maxmemory; diff --git a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h index 28ee330689d..c19074c288f 100644 --- a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h +++ b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h @@ -21,23 +21,20 @@ private: using LockGuard = std::lock_guard<Mutex>; using ProtonConfig = vespa::config::search::core::ProtonConfig; - Mutex _mutex; - MemoryFlush::SP _flushStrategy; + Mutex _mutex; + MemoryFlush::SP _flushStrategy; ProtonConfig::Flush::Memory _currConfig; - HwInfo::Memory _memory; - DiskMemUsageState _currState; - bool _useConservativeDiskMode; - bool _useConservativeMemoryMode; - bool _nodeRetired; + HwInfo::Memory _memory; + DiskMemUsageState _currState; + bool _useConservativeDiskMode; + bool _useConservativeMemoryMode; + bool _nodeRetired; - void considerUseConservativeDiskMode(const LockGuard &guard, - MemoryFlush::Config &newConfig); - void considerUseConservativeMemoryMode(const LockGuard &guard, - MemoryFlush::Config &newConfig); - void considerUseRelaxedDiskMode(const LockGuard &guard, - MemoryFlush::Config &newConfig); - void updateFlushStrategy(const LockGuard &guard); + void considerUseConservativeDiskMode(const LockGuard &guard, MemoryFlush::Config &newConfig); + void considerUseConservativeMemoryMode(const LockGuard &guard, MemoryFlush::Config &newConfig); + void considerUseRelaxedDiskMode(const LockGuard &guard, MemoryFlush::Config &newConfig); + void updateFlushStrategy(const LockGuard &guard, const char * why); public: using UP = std::unique_ptr<MemoryFlushConfigUpdater>; @@ -47,7 +44,7 @@ public: const HwInfo::Memory &memory); void setConfig(const ProtonConfig::Flush::Memory &newConfig); void setNodeRetired(bool nodeRetired); - virtual void notifyDiskMemUsage(DiskMemUsageState newState) override; + void notifyDiskMemUsage(DiskMemUsageState newState) override; static MemoryFlush::Config convertConfig(const ProtonConfig::Flush::Memory &config, const HwInfo::Memory &memory); diff --git a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java index c508296d739..c7080ec28d8 100644 --- a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java +++ b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java @@ -4,9 +4,17 @@ package com.yahoo.searchlib.aggregation; import com.yahoo.searchlib.expression.AggregationRefNode; import com.yahoo.searchlib.expression.ExpressionNode; import com.yahoo.searchlib.expression.ResultNode; -import com.yahoo.vespa.objects.*; - -import java.util.*; +import com.yahoo.vespa.objects.Deserializer; +import com.yahoo.vespa.objects.Identifiable; +import com.yahoo.vespa.objects.ObjectOperation; +import com.yahoo.vespa.objects.ObjectPredicate; +import com.yahoo.vespa.objects.ObjectVisitor; +import com.yahoo.vespa.objects.Serializer; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; public class Group extends Identifiable { @@ -132,11 +140,7 @@ public class Group extends Identifiable { if (sortType == SortType.BYID) { return; } - Collections.sort(children, new Comparator<Group>() { - public int compare(Group lhs, Group rhs) { - return lhs.compareId(rhs); - } - }); + Collections.sort(children, (Group lhs, Group rhs) -> lhs.compareId(rhs)); sortType = SortType.BYID; } @@ -145,11 +149,8 @@ public class Group extends Identifiable { if (sortType == SortType.BYRANK) { return; } - Collections.sort(children, new Comparator<Group>() { - public int compare(Group lhs, Group rhs) { - return lhs.compareRank(rhs); - } - }); + Collections.sort(children, (Group lhs, Group rhs) -> lhs.compareRank(rhs) ); + sortType = SortType.BYRANK; } @@ -403,22 +404,19 @@ public class Group extends Identifiable { if (id != null) { obj.id = (ResultNode)id.clone(); } - obj.aggregationResults = new ArrayList<AggregationResult>(); + obj.aggregationResults = new ArrayList<>(); for (AggregationResult result : aggregationResults) { obj.aggregationResults.add(result.clone()); } - obj.orderByIdx = new ArrayList<Integer>(); - for (Integer idx : orderByIdx) { - obj.orderByIdx.add(idx); - } - obj.orderByExp = new ArrayList<ExpressionNode>(); + obj.orderByIdx = new ArrayList<>(orderByIdx); + obj.orderByExp = new ArrayList<>(); RefResolver resolver = new RefResolver(obj); for (ExpressionNode exp : orderByExp) { exp = exp.clone(); exp.select(REF_LOCATOR, resolver); obj.orderByExp.add(exp); } - obj.children = new ArrayList<Group>(); + obj.children = new ArrayList<>(); for (Group child : children) { obj.children.add(child.clone()); } @@ -447,7 +445,7 @@ public class Group extends Identifiable { } } - private static enum SortType { + private enum SortType { UNSORTED, BYRANK, BYID diff --git a/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java index ec379e5f8af..e6143a17523 100644 --- a/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java +++ b/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java @@ -1,6 +1,7 @@ // Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package com.yahoo.searchlib.aggregation; +import com.yahoo.searchlib.expression.FloatResultNode; import com.yahoo.searchlib.expression.NullResultNode; import com.yahoo.searchlib.expression.StringBucketResultNode; import com.yahoo.vespa.objects.BufferSerializer; @@ -186,6 +187,7 @@ public class GroupingTestCase { public void requireThatNeedDeepResultCollectionWorks() { assertFalse(new Grouping().addLevel(new GroupingLevel().setGroupPrototype(new Group())).needDeepResultCollection()); assertTrue(new Grouping().addLevel(new GroupingLevel().setGroupPrototype(new Group().addOrderBy(new CountAggregationResult(9), true))).needDeepResultCollection()); + assertTrue(new Grouping().addLevel(new GroupingLevel().setGroupPrototype(new Group().addOrderBy(new AverageAggregationResult(), true))).needDeepResultCollection()); } @Test diff --git a/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp b/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp index 2e83d2acbf2..8d202100699 100644 --- a/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp +++ b/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp @@ -95,6 +95,7 @@ struct ExecFixture attrs.push_back(AttributeFactory::createAttribute("astr", AVC(AVBT::STRING, AVCT::ARRAY))); attrs.push_back(AttributeFactory::createAttribute("aint", AVC(AVBT::INT32, AVCT::ARRAY))); attrs.push_back(AttributeFactory::createAttribute("wsstr", AVC(AVBT::STRING, AVCT::WSET))); + attrs.push_back(AttributeFactory::createAttribute("sint", AVC(AVBT::INT32, AVCT::SINGLE))); for (const auto &attr : attrs) { attr->addReservedDoc(); @@ -112,6 +113,9 @@ struct ExecFixture aint->append(1, 3, 0); aint->append(1, 5, 0); aint->append(1, 7, 0); + + IntegerAttribute *sint = static_cast<IntegerAttribute *>(attrs[3].get()); + sint->update(1, 5); for (const auto &attr : attrs) { attr->commit(); @@ -167,6 +171,20 @@ TEST_F("require that array attribute can be converted to tensor (explicit dimens .add({{"dim", "5"}}, 1)), f.execute()); } +TEST_F("require that single-value integer attribute can be converted to tensor (default dimension)", + ExecFixture("tensorFromLabels(attribute(sint))")) +{ + EXPECT_EQUAL(*make_tensor(TensorSpec("tensor(sint{})") + .add({{"sint", "5"}}, 1)), f.execute()); +} + +TEST_F("require that single-value integer attribute can be converted to tensor (explicit dimension)", + ExecFixture("tensorFromLabels(attribute(sint),foobar)")) +{ + EXPECT_EQUAL(*make_tensor(TensorSpec("tensor(foobar{})") + .add({{"foobar", "5"}}, 1)), f.execute()); +} + TEST_F("require that empty tensor is created if attribute does not exists", ExecFixture("tensorFromLabels(attribute(null))")) { diff --git a/searchlib/src/vespa/searchlib/aggregation/group.h b/searchlib/src/vespa/searchlib/aggregation/group.h index 5b425de24e6..681cda43afa 100644 --- a/searchlib/src/vespa/searchlib/aggregation/group.h +++ b/searchlib/src/vespa/searchlib/aggregation/group.h @@ -232,7 +232,7 @@ public: /** * Recursively checks if any itself or any children needs a full resort. - * Then all hits must be processed and should be doen before any hit sorting. + * Then all hits must be processed and should be done before any hit sorting. */ bool needResort() const { return _aggr.needResort(); } diff --git a/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp b/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp index 837c38eb340..25bc754a86f 100644 --- a/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp +++ b/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp @@ -5,7 +5,6 @@ #include <vespa/vespalib/util/stringfmt.h> #include <vespa/vespalib/util/guard.h> #include <cassert> -#include <algorithm> #include <vespa/log/log.h> LOG_SETUP(".indexmetainfo"); @@ -14,13 +13,13 @@ namespace { class Parser { private: - vespalib::string _name; + vespalib::string _name; vespalib::FilePointer _file; uint32_t _line; char _buf[2048]; bool _error; - vespalib::string _lastKey; - vespalib::string _lastValue; + vespalib::string _lastKey; + vespalib::string _lastValue; uint32_t _lastIdx; bool _matched; @@ -44,8 +43,7 @@ public: return false; } bool illegalLine() { - LOG(warning, "%s:%d: illegal line: %s", - _name.c_str(), _line, _buf); + LOG(warning, "%s:%d: illegal line: %s", _name.c_str(), _line, _buf); _error = true; return false; } @@ -57,8 +55,7 @@ public: } bool illegalValue() { LOG(warning, "%s:%d: illegal value for '%s': %s", - _name.c_str(), _line, _lastKey.c_str(), - _lastValue.c_str()); + _name.c_str(), _line, _lastKey.c_str(), _lastValue.c_str()); _error = true; return false; } @@ -79,7 +76,7 @@ public: if (!_file.valid()) { return openFailed(); } - if (fgets(_buf, sizeof(_buf), _file) == NULL) { + if (fgets(_buf, sizeof(_buf), _file) == nullptr) { return false; // EOF } ++_line; @@ -88,7 +85,7 @@ public: _buf[--len] = '\0'; } char *split = strchr(_buf, '='); - if (split == NULL || (split - _buf) == 0) { + if (split == nullptr || (split - _buf) == 0) { return illegalLine(); } _lastKey = vespalib::string(_buf, split - _buf); @@ -119,9 +116,9 @@ public: void parseInt64(const vespalib::string &k, uint64_t &v) { if (!_matched && !_error && _lastKey == k) { _matched = true; - char *end = NULL; + char *end = nullptr; uint64_t val = strtoull(_lastValue.c_str(), &end, 10); - if (end == NULL || *end != '\0' || + if (end == nullptr || *end != '\0' || val == static_cast<uint64_t>(-1)) { illegalValue(); return; @@ -141,10 +138,10 @@ public: if (dot2 == vespalib::string::npos) { return illegalArrayKey(); } - char *end = NULL; + char *end = nullptr; const char *pt = _lastKey.c_str() + name.length() + 1; uint32_t val = strtoul(pt, &end, 10); - if (end == NULL || end == pt || *end != '.' + if (end == nullptr || end == pt || *end != '.' || val > size || size > val + 1) { return illegalArrayKey(); @@ -200,7 +197,7 @@ IndexMetaInfo::IndexMetaInfo(const vespalib::string &path) { } -IndexMetaInfo::~IndexMetaInfo() {} +IndexMetaInfo::~IndexMetaInfo() = default; IndexMetaInfo::Snapshot IndexMetaInfo::getBestSnapshot() const @@ -209,11 +206,7 @@ IndexMetaInfo::getBestSnapshot() const while (idx >= 0 && !_snapshots[idx].valid) { --idx; } - if (idx >= 0) { - return _snapshots[idx]; - } else { - return Snapshot(); - } + return (idx >= 0) ? _snapshots[idx] : Snapshot(); } @@ -233,7 +226,7 @@ bool IndexMetaInfo::addSnapshot(const Snapshot &snap) { if (snap.dirName.empty() - || findSnapshot(snap.syncToken) != _snapshots.end()) + || (findSnapshot(snap.syncToken) != _snapshots.end())) { return false; } @@ -324,32 +317,23 @@ IndexMetaInfo::save(const vespalib::string &baseName) fprintf(f, "snapshot.%d.dirName=%s\n", i, snap.dirName.c_str()); } if (ferror(f) != 0) { - LOG(error, - "Could not write to file %s", - newName.c_str()); + LOG(error, "Could not write to file %s", newName.c_str()); return false; } if (fflush(f) != 0) { - LOG(error, - "Could not flush file %s", - newName.c_str()); + LOG(error, "Could not flush file %s", newName.c_str()); return false; } if (fsync(fileno(f)) != 0) { - LOG(error, - "Could not fsync file %s", - newName.c_str()); + LOG(error, "Could not fsync file %s", newName.c_str()); return false; } if (fclose(f.release()) != 0) { - LOG(error, - "Could not close file %s", - newName.c_str()); + LOG(error, "Could not close file %s", newName.c_str()); return false; } if (rename(newName.c_str(), fileName.c_str()) != 0) { - LOG(warning, "could not rename: %s->%s", - newName.c_str(), fileName.c_str()); + LOG(warning, "could not rename: %s->%s", newName.c_str(), fileName.c_str()); return false; } vespalib::File::sync(vespalib::dirname(fileName)); diff --git a/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp b/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp index 76a6e908fcb..24e06cfe639 100644 --- a/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp +++ b/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp @@ -45,7 +45,7 @@ TensorFromLabelsBlueprint::setup(const search::fef::IIndexEnvironment &env, _dimension = _sourceParam; } describeOutput("tensor", - "The tensor created from the given array source (attribute field or query parameter)", + "The tensor created from the given source (attribute field or query parameter)", FeatureType::object(ValueType::make_type(CellType::DOUBLE, {{_dimension}}))); return validSource; } @@ -63,10 +63,14 @@ createAttributeExecutor(const search::fef::IQueryEnvironment &env, " Returning empty tensor.", attrName.c_str()); return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash); } - if (attribute->getCollectionType() != search::attribute::CollectionType::ARRAY || - attribute->isFloatingPointType()) { - LOG(warning, "The attribute vector '%s' is NOT of type array of string or integer." - " Returning empty tensor.", attrName.c_str()); + if (attribute->isFloatingPointType()) { + LOG(warning, "The attribute vector '%s' must have basic type string or integer." + " Returning empty tensor.", attrName.c_str()); + return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash); + } + if (attribute->getCollectionType() == search::attribute::CollectionType::WSET) { + LOG(warning, "The attribute vector '%s' is a weighted set - use tensorFromWeightedSet instead." + " Returning empty tensor.", attrName.c_str()); return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash); } // Note that for array attribute vectors the default weight is 1.0 for all values. diff --git a/storage/src/tests/distributor/CMakeLists.txt b/storage/src/tests/distributor/CMakeLists.txt index f43280a5b44..fad8ca0bb25 100644 --- a/storage/src/tests/distributor/CMakeLists.txt +++ b/storage/src/tests/distributor/CMakeLists.txt @@ -7,14 +7,12 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST bucket_db_prune_elision_test.cpp bucketdatabasetest.cpp bucketdbmetricupdatertest.cpp - bucketdbupdatertest.cpp bucketgctimecalculatortest.cpp bucketstateoperationtest.cpp distributor_bucket_space_test.cpp distributor_host_info_reporter_test.cpp distributor_message_sender_stub.cpp distributor_stripe_pool_test.cpp - distributortest.cpp distributortestutil.cpp externaloperationhandlertest.cpp garbagecollectiontest.cpp @@ -22,6 +20,8 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST gtest_runner.cpp idealstatemanagertest.cpp joinbuckettest.cpp + legacy_bucket_db_updater_test.cpp + legacy_distributor_test.cpp maintenanceschedulertest.cpp mergelimitertest.cpp mergeoperationtest.cpp diff --git a/storage/src/tests/distributor/bucketdbupdatertest.cpp b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp index 7e8fec3b83a..e353e976081 100644 --- a/storage/src/tests/distributor/bucketdbupdatertest.cpp +++ b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp @@ -57,12 +57,14 @@ getRequestBucketInfoStrings(uint32_t count) } -class BucketDBUpdaterTest : public Test, - public DistributorTestUtil +// TODO STRIPE: Add variant of this test for the new stripe mode. +// TODO STRIPE: Remove this test when legacy mode is gone. +class LegacyBucketDBUpdaterTest : public Test, + public DistributorTestUtil { public: - BucketDBUpdaterTest(); - ~BucketDBUpdaterTest() override; + LegacyBucketDBUpdaterTest(); + ~LegacyBucketDBUpdaterTest() override; auto &defaultDistributorBucketSpace() { return getBucketSpaceRepo().get(makeBucketSpace()); } @@ -501,7 +503,7 @@ public: std::unique_ptr<PendingClusterState> state; PendingClusterStateFixture( - BucketDBUpdaterTest& owner, + LegacyBucketDBUpdaterTest& owner, const std::string& oldClusterState, const std::string& newClusterState) { @@ -520,7 +522,7 @@ public: } PendingClusterStateFixture( - BucketDBUpdaterTest& owner, + LegacyBucketDBUpdaterTest& owner, const std::string& oldClusterState) { ClusterInformation::CSP clusterInfo( @@ -551,15 +553,15 @@ public: } }; -BucketDBUpdaterTest::BucketDBUpdaterTest() +LegacyBucketDBUpdaterTest::LegacyBucketDBUpdaterTest() : DistributorTestUtil(), _bucketSpaces() { } -BucketDBUpdaterTest::~BucketDBUpdaterTest() = default; +LegacyBucketDBUpdaterTest::~LegacyBucketDBUpdaterTest() = default; -TEST_F(BucketDBUpdaterTest, normal_usage) { +TEST_F(LegacyBucketDBUpdaterTest, normal_usage) { setSystemState(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3")); ASSERT_EQ(messageCount(3), _sender.commands().size()); @@ -590,7 +592,7 @@ TEST_F(BucketDBUpdaterTest, normal_usage) { ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(10, "distributor:2 storage:3")); } -TEST_F(BucketDBUpdaterTest, distributor_change) { +TEST_F(LegacyBucketDBUpdaterTest, distributor_change) { int numBuckets = 100; // First sends request @@ -620,7 +622,7 @@ TEST_F(BucketDBUpdaterTest, distributor_change) { ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:2 storage:3")); } -TEST_F(BucketDBUpdaterTest, distributor_change_with_grouping) { +TEST_F(LegacyBucketDBUpdaterTest, distributor_change_with_grouping) { std::string distConfig(getDistConfig6Nodes2Groups()); setDistribution(distConfig); int numBuckets = 100; @@ -651,7 +653,7 @@ TEST_F(BucketDBUpdaterTest, distributor_change_with_grouping) { ASSERT_EQ(messageCount(6), _sender.commands().size()); } -TEST_F(BucketDBUpdaterTest, normal_usage_initializing) { +TEST_F(LegacyBucketDBUpdaterTest, normal_usage_initializing) { setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1 .0.s:i")); ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size()); @@ -688,7 +690,7 @@ TEST_F(BucketDBUpdaterTest, normal_usage_initializing) { ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(20, "distributor:1 storage:1")); } -TEST_F(BucketDBUpdaterTest, failed_request_bucket_info) { +TEST_F(LegacyBucketDBUpdaterTest, failed_request_bucket_info) { setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1")); // 2 messages sent up: 1 to the nodes, and one reply to the setsystemstate. @@ -730,7 +732,7 @@ TEST_F(BucketDBUpdaterTest, failed_request_bucket_info) { EXPECT_EQ(std::string("Set system state"), _senderDown.getCommands()); } -TEST_F(BucketDBUpdaterTest, down_while_init) { +TEST_F(LegacyBucketDBUpdaterTest, down_while_init) { ASSERT_NO_FATAL_FAILURE(setStorageNodes(3)); ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 storage:3"), @@ -746,7 +748,7 @@ TEST_F(BucketDBUpdaterTest, down_while_init) { } bool -BucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) const +LegacyBucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) const { for (int i=1; i<bucketCount; i++) { if (bucketHasNode(document::BucketId(16, i), node)) { @@ -758,7 +760,7 @@ BucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) con } std::string -BucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes, size_t count) +LegacyBucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes, size_t count) { std::ostringstream ost; bool first = true; @@ -775,13 +777,13 @@ BucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes, size_t count) } std::string -BucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes) +LegacyBucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes) { return getNodeList(std::move(nodes), _bucketSpaces.size()); } std::vector<uint16_t> -BucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes) +LegacyBucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes) { std::vector<uint16_t> res; size_t count = _bucketSpaces.size(); @@ -793,7 +795,7 @@ BucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes) return res; } -TEST_F(BucketDBUpdaterTest, node_down) { +TEST_F(LegacyBucketDBUpdaterTest, node_down) { ASSERT_NO_FATAL_FAILURE(setStorageNodes(3)); enableDistributorClusterState("distributor:1 storage:3"); @@ -808,7 +810,7 @@ TEST_F(BucketDBUpdaterTest, node_down) { EXPECT_FALSE(bucketExistsThatHasNode(100, 1)); } -TEST_F(BucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) { +TEST_F(LegacyBucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) { ASSERT_NO_FATAL_FAILURE(setStorageNodes(3)); enableDistributorClusterState("distributor:1 storage:3"); @@ -823,7 +825,7 @@ TEST_F(BucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) EXPECT_FALSE(bucketExistsThatHasNode(100, 1)); } -TEST_F(BucketDBUpdaterTest, node_down_copies_get_in_sync) { +TEST_F(LegacyBucketDBUpdaterTest, node_down_copies_get_in_sync) { ASSERT_NO_FATAL_FAILURE(setStorageNodes(3)); lib::ClusterState systemState("distributor:1 storage:3"); @@ -840,7 +842,7 @@ TEST_F(BucketDBUpdaterTest, node_down_copies_get_in_sync) { dumpBucket(bid)); } -TEST_F(BucketDBUpdaterTest, initializing_while_recheck) { +TEST_F(LegacyBucketDBUpdaterTest, initializing_while_recheck) { lib::ClusterState systemState("distributor:1 storage:2 .0.s:i .0.i:0.1"); setSystemState(systemState); @@ -858,7 +860,7 @@ TEST_F(BucketDBUpdaterTest, initializing_while_recheck) { EXPECT_EQ(MessageType::SETSYSTEMSTATE, _senderDown.command(0)->getType()); } -TEST_F(BucketDBUpdaterTest, bit_change) { +TEST_F(LegacyBucketDBUpdaterTest, bit_change) { std::vector<document::BucketId> bucketlist; { @@ -957,7 +959,7 @@ TEST_F(BucketDBUpdaterTest, bit_change) { } }; -TEST_F(BucketDBUpdaterTest, recheck_node_with_failure) { +TEST_F(LegacyBucketDBUpdaterTest, recheck_node_with_failure) { ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5)); _sender.clear(); @@ -1000,7 +1002,7 @@ TEST_F(BucketDBUpdaterTest, recheck_node_with_failure) { EXPECT_EQ(size_t(2), _sender.commands().size()); } -TEST_F(BucketDBUpdaterTest, recheck_node) { +TEST_F(LegacyBucketDBUpdaterTest, recheck_node) { ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5)); _sender.clear(); @@ -1038,7 +1040,7 @@ TEST_F(BucketDBUpdaterTest, recheck_node) { EXPECT_EQ(api::BucketInfo(20,10,12, 50, 60, true, true), copy->getBucketInfo()); } -TEST_F(BucketDBUpdaterTest, notify_bucket_change) { +TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change) { enableDistributorClusterState("distributor:1 storage:1"); addNodesToBucketDB(document::BucketId(16, 1), "0=1234"); @@ -1101,7 +1103,7 @@ TEST_F(BucketDBUpdaterTest, notify_bucket_change) { dumpBucket(document::BucketId(16, 2))); } -TEST_F(BucketDBUpdaterTest, notify_bucket_change_from_node_down) { +TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change_from_node_down) { enableDistributorClusterState("distributor:1 storage:2"); addNodesToBucketDB(document::BucketId(16, 1), "1=1234"); @@ -1155,7 +1157,7 @@ TEST_F(BucketDBUpdaterTest, notify_bucket_change_from_node_down) { * distributor in the pending state but not by the current state would be * discarded when attempted inserted into the bucket database. */ -TEST_F(BucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_requests) { +TEST_F(LegacyBucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_requests) { setSystemState(lib::ClusterState("distributor:1 storage:1")); ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size()); @@ -1194,7 +1196,7 @@ TEST_F(BucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_ } } -TEST_F(BucketDBUpdaterTest, merge_reply) { +TEST_F(LegacyBucketDBUpdaterTest, merge_reply) { enableDistributorClusterState("distributor:1 storage:3"); addNodesToBucketDB(document::BucketId(16, 1234), @@ -1236,7 +1238,7 @@ TEST_F(BucketDBUpdaterTest, merge_reply) { dumpBucket(document::BucketId(16, 1234))); }; -TEST_F(BucketDBUpdaterTest, merge_reply_node_down) { +TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down) { enableDistributorClusterState("distributor:1 storage:3"); std::vector<api::MergeBucketCommand::Node> nodes; @@ -1278,7 +1280,7 @@ TEST_F(BucketDBUpdaterTest, merge_reply_node_down) { dumpBucket(document::BucketId(16, 1234))); }; -TEST_F(BucketDBUpdaterTest, merge_reply_node_down_after_request_sent) { +TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) { enableDistributorClusterState("distributor:1 storage:3"); std::vector<api::MergeBucketCommand::Node> nodes; @@ -1321,7 +1323,7 @@ TEST_F(BucketDBUpdaterTest, merge_reply_node_down_after_request_sent) { }; -TEST_F(BucketDBUpdaterTest, flush) { +TEST_F(LegacyBucketDBUpdaterTest, flush) { enableDistributorClusterState("distributor:1 storage:3"); _sender.clear(); @@ -1348,7 +1350,7 @@ TEST_F(BucketDBUpdaterTest, flush) { } std::string -BucketDBUpdaterTest::getSentNodes( +LegacyBucketDBUpdaterTest::getSentNodes( const std::string& oldClusterState, const std::string& newClusterState) { @@ -1372,7 +1374,7 @@ BucketDBUpdaterTest::getSentNodes( } std::string -BucketDBUpdaterTest::getSentNodesDistributionChanged( +LegacyBucketDBUpdaterTest::getSentNodesDistributionChanged( const std::string& oldClusterState) { DistributorMessageSenderStub sender; @@ -1399,7 +1401,7 @@ BucketDBUpdaterTest::getSentNodesDistributionChanged( return ost.str(); } -TEST_F(BucketDBUpdaterTest, pending_cluster_state_send_messages) { +TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_send_messages) { EXPECT_EQ(getNodeList({0, 1, 2}), getSentNodes("cluster:d", "distributor:1 storage:3")); @@ -1496,7 +1498,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_send_messages) { "distributor:3 storage:3 .1.s:m")); }; -TEST_F(BucketDBUpdaterTest, pending_cluster_state_receive) { +TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_receive) { DistributorMessageSenderStub sender; auto cmd(std::make_shared<api::SetSystemStateCommand>( @@ -1534,7 +1536,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_receive) { EXPECT_EQ(3, (int)pendingTransition.results().size()); } -TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down) { +TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down) { std::string config(getDistConfig6Nodes4Groups()); config += "distributor_auto_ownership_transfer_on_whole_group_down true\n"; setDistribution(config); @@ -1553,7 +1555,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down) { "distributor:6 .2.s:d storage:6")); } -TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) { +TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) { std::string config(getDistConfig6Nodes4Groups()); config += "distributor_auto_ownership_transfer_on_whole_group_down false\n"; setDistribution(config); @@ -1639,7 +1641,7 @@ struct BucketDumper : public BucketDatabase::EntryProcessor }; std::string -BucketDBUpdaterTest::mergeBucketLists( +LegacyBucketDBUpdaterTest::mergeBucketLists( const lib::ClusterState& oldState, const std::string& existingData, const lib::ClusterState& newState, @@ -1694,7 +1696,7 @@ BucketDBUpdaterTest::mergeBucketLists( } std::string -BucketDBUpdaterTest::mergeBucketLists(const std::string& existingData, +LegacyBucketDBUpdaterTest::mergeBucketLists(const std::string& existingData, const std::string& newData, bool includeBucketInfo) { @@ -1706,7 +1708,7 @@ BucketDBUpdaterTest::mergeBucketLists(const std::string& existingData, includeBucketInfo); } -TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge) { +TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge) { // Simple initializing case - ask all nodes for info EXPECT_EQ( // Result is on the form: [bucket w/o count bits]:[node indexes]|.. @@ -1745,7 +1747,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge) { mergeBucketLists("", "0:5/0/0/0|1:5/2/3/4", true)); } -TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) { +TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) { // Node went from initializing to up and non-invalid bucket changed. EXPECT_EQ( std::string("2:0/2/3/4/t|3:0/2/4/6/t|"), @@ -1757,7 +1759,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) { true)); } -TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) { +TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) { document::BucketId bucket(16, 3); lib::ClusterState stateBefore("distributor:1 storage:1"); { @@ -1786,7 +1788,7 @@ TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_s EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(bucket)); } -TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) { +TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) { document::BucketId bucket(16, 3); lib::ClusterState stateBefore("distributor:1 storage:1"); { @@ -1820,7 +1822,7 @@ TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_s * will with a high likelihood end up not getting the complete view of the buckets in * the cluster. */ -TEST_F(BucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribution_change_pending) { +TEST_F(LegacyBucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribution_change_pending) { lib::ClusterState stateBefore("distributor:6 storage:6"); { uint32_t expectedMsgs = messageCount(6), dummyBucketsToReturn = 1; @@ -1862,7 +1864,7 @@ TEST_F(BucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribut EXPECT_EQ(size_t(0), _sender.commands().size()); } -TEST_F(BucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) { +TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) { ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), messageCount(6), 20)); _sender.clear(); EXPECT_TRUE(distributor_is_in_recovery_mode()); @@ -1911,7 +1913,7 @@ std::unique_ptr<BucketDatabase::EntryProcessor> func_processor(Func&& f) { } -TEST_F(BucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) { +TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) { setDistribution(getDistConfig3Nodes1Group()); constexpr uint32_t n_buckets = 100; @@ -1930,7 +1932,7 @@ TEST_F(BucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db })); } -TEST_F(BucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) { +TEST_F(LegacyBucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) { getClock().setAbsoluteTimeInSeconds(101234); lib::ClusterState stateBefore("distributor:1 storage:1"); { @@ -1945,7 +1947,7 @@ TEST_F(BucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestam EXPECT_EQ(uint32_t(101234), e->getLastGarbageCollectionTime()); } -TEST_F(BucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) { +TEST_F(LegacyBucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) { { lib::ClusterState stateBefore("distributor:1 storage:1 .0.s:i"); uint32_t expectedMsgs = _bucketSpaces.size(), dummyBucketsToReturn = 0; @@ -1992,7 +1994,7 @@ TEST_F(BucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fe } std::vector<uint16_t> -BucketDBUpdaterTest::getSendSet() const +LegacyBucketDBUpdaterTest::getSendSet() const { std::vector<uint16_t> nodes; std::transform(_sender.commands().begin(), @@ -2007,7 +2009,7 @@ BucketDBUpdaterTest::getSendSet() const } std::vector<uint16_t> -BucketDBUpdaterTest::getSentNodesWithPreemption( +LegacyBucketDBUpdaterTest::getSentNodesWithPreemption( const std::string& oldClusterState, uint32_t expectedOldStateMessages, const std::string& preemptedClusterState, @@ -2040,7 +2042,7 @@ using nodeVec = std::vector<uint16_t>; * database modifications caused by intermediate states will not be * accounted for (basically the ABA problem in a distributed setting). */ -TEST_F(BucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_to_next_state_fetch) { +TEST_F(LegacyBucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_to_next_state_fetch) { EXPECT_EQ( expandNodeVec({0, 1, 2, 3, 4, 5}), getSentNodesWithPreemption("version:1 distributor:6 storage:6", @@ -2049,7 +2051,7 @@ TEST_F(BucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_t "version:3 distributor:6 storage:6")); } -TEST_F(BucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) { +TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) { EXPECT_EQ( expandNodeVec({2, 3}), getSentNodesWithPreemption( @@ -2059,7 +2061,7 @@ TEST_F(BucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_ne "version:3 distributor:6 storage:6")); } -TEST_F(BucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) { +TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) { EXPECT_EQ( expandNodeVec({2}), getSentNodesWithPreemption( @@ -2069,7 +2071,7 @@ TEST_F(BucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) { "version:3 distributor:6 storage:6")); } -TEST_F(BucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) { +TEST_F(LegacyBucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) { EXPECT_EQ( nodeVec{}, getSentNodesWithPreemption( @@ -2079,7 +2081,7 @@ TEST_F(BucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) { "version:3 distributor:6 storage:6 .2.s:d")); // 2 down again. } -TEST_F(BucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) { +TEST_F(LegacyBucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) { // Even though 100 nodes are preempted, not all of these should be part // of the request afterwards when only 6 are part of the state. EXPECT_EQ( @@ -2091,7 +2093,7 @@ TEST_F(BucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) { "version:3 distributor:6 storage:6")); } -TEST_F(BucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) { +TEST_F(LegacyBucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) { lib::ClusterState stateBefore( "version:1 distributor:6 storage:6 .1.t:1234"); uint32_t expectedMsgs = messageCount(6), dummyBucketsToReturn = 10; @@ -2111,7 +2113,7 @@ TEST_F(BucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_com // distribution config will follow very shortly after the config has been // applied to the node. The new cluster state will then send out requests to // the correct node set. -TEST_F(BucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_available_nodes) { +TEST_F(LegacyBucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_available_nodes) { uint32_t expectedMsgs = 6, dummyBucketsToReturn = 20; ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), expectedMsgs, dummyBucketsToReturn)); @@ -2134,7 +2136,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_avail * * See VESPA-790 for details. */ -TEST_F(BucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_ownership_transfer) { +TEST_F(LegacyBucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_ownership_transfer) { uint32_t expectedMsgs = messageCount(3), dummyBucketsToReturn = 1; ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:3 storage:3"), expectedMsgs, dummyBucketsToReturn)); @@ -2170,7 +2172,7 @@ TEST_F(BucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_owner EXPECT_EQ(expandNodeVec({0, 1}), getSendSet()); } -TEST_F(BucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) { +TEST_F(LegacyBucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) { auto fixture = createPendingStateFixtureForStateChange( "distributor:2 storage:2", "distributor:1 storage:2"); EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer()); @@ -2180,7 +2182,7 @@ TEST_F(BucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer()); } -TEST_F(BucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) { +TEST_F(LegacyBucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) { auto fixture = createPendingStateFixtureForStateChange( "distributor:2 storage:2", "distributor:2 storage:1"); EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer()); @@ -2190,26 +2192,26 @@ TEST_F(BucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_trans EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer()); } -TEST_F(BucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) { +TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) { auto fixture = createPendingStateFixtureForDistributionChange( "distributor:2 storage:2"); EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer()); } -TEST_F(BucketDBUpdaterTest, transition_time_tracked_for_single_state_change) { +TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_single_state_change) { ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2))); EXPECT_EQ(uint64_t(5000), lastTransitionTimeInMillis()); } -TEST_F(BucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) { +TEST_F(LegacyBucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) { ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2))); ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:3", 3, messageCount(1))); EXPECT_EQ(uint64_t(3000), lastTransitionTimeInMillis()); } -TEST_F(BucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) { +TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) { lib::ClusterState state("distributor:2 storage:2"); ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(state, messageCount(2), 1)); @@ -2221,7 +2223,7 @@ TEST_F(BucketDBUpdaterTest, transition_time_tracked_for_distribution_config_chan EXPECT_EQ(uint64_t(4000), lastTransitionTimeInMillis()); } -TEST_F(BucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) { +TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) { _sender.clear(); lib::ClusterState state("distributor:2 storage:2"); setSystemState(state); @@ -2245,7 +2247,7 @@ TEST_F(BucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions * Yes, the order of node<->bucket id is reversed between the two, perhaps to make sure you're awake. */ -TEST_F(BucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted) { +TEST_F(LegacyBucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted) { // Replacing bucket information for content node 0 should not mark existing // untrusted replica as trusted as a side effect. EXPECT_EQ( @@ -2257,32 +2259,32 @@ TEST_F(BucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not "0:5/1/2/3|1:5/7/8/9", true)); } -TEST_F(BucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) { +TEST_F(LegacyBucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) { EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"), mergeBucketLists("", "0:5/1/2/3|1:5/7/8/9", true)); } -TEST_F(BucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) { +TEST_F(LegacyBucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) { EXPECT_EQ(std::string("5:0/1/2/3/t|"), mergeBucketLists("", "0:5/1/2/3", true)); } -TEST_F(BucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) { +TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) { EXPECT_EQ(std::string("5:0/1/2/3/t|"), mergeBucketLists("0:5/1/2/3", "0:5/1/2/3", true)); } -TEST_F(BucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) { +TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) { EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"), mergeBucketLists("0:5/1/2/3|1:5/7/8/9", "0:5/1/2/3|1:5/7/8/9", true)); } -TEST_F(BucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) { +TEST_F(LegacyBucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) { EXPECT_EQ(std::string("5:1/2/3/4/u,0/1/2/3/t|"), mergeBucketLists("0:5/1/2/3", "0:5/1/2/3|1:5/2/3/4", true)); } -TEST_F(BucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) { +TEST_F(LegacyBucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) { // This differs from batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted // in that _all_ content nodes are considered outdated when distributor changes take place, // and therefore a slightly different code path is taken. In particular, bucket info for @@ -2298,7 +2300,7 @@ TEST_F(BucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_d } // TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475 -TEST_F(BucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) { +TEST_F(LegacyBucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) { std::string distConfig(getDistConfig6Nodes2Groups()); setDistribution(distConfig); @@ -2366,7 +2368,7 @@ void for_each_bucket(const DistributorBucketSpaceRepo& repo, Func&& f) { } -TEST_F(BucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) { +TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) { getBucketDBUpdater().set_stale_reads_enabled(true); lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition @@ -2407,7 +2409,7 @@ TEST_F(BucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership }); } -TEST_F(BucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) { +TEST_F(LegacyBucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) { constexpr uint32_t n_buckets = 10; // No ownership change, just node down. Test redundancy is 2, so removing 2 nodes will // cause some buckets to be entirely unavailable. @@ -2418,7 +2420,7 @@ TEST_F(BucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_on EXPECT_EQ(size_t(0), read_only_global_db().size()); } -TEST_F(BucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) { +TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) { getBucketDBUpdater().set_stale_reads_enabled(false); lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition @@ -2440,7 +2442,7 @@ TEST_F(BucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_c EXPECT_EQ(size_t(0), read_only_global_db().size()); } -void BucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition( +void LegacyBucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition( vespalib::stringref initial_state_str, uint32_t initial_buckets, uint32_t initial_expected_msgs, @@ -2463,7 +2465,7 @@ void BucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition( _sender.clear(); } -TEST_F(BucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) { +TEST_F(LegacyBucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) { getBucketDBUpdater().set_stale_reads_enabled(true); constexpr uint32_t n_buckets = 10; ASSERT_NO_FATAL_FAILURE( @@ -2483,7 +2485,7 @@ TEST_F(BucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until EXPECT_EQ(uint64_t(n_buckets), mutable_global_db().size()); } -TEST_F(BucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) { +TEST_F(LegacyBucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) { getBucketDBUpdater().set_stale_reads_enabled(true); constexpr uint32_t n_buckets = 10; ASSERT_NO_FATAL_FAILURE( @@ -2495,7 +2497,7 @@ TEST_F(BucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated EXPECT_EQ(uint64_t(0), read_only_global_db().size()); } -TEST_F(BucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) { +TEST_F(LegacyBucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) { getBucketDBUpdater().set_stale_reads_enabled(true); constexpr uint32_t n_buckets = 10; ASSERT_NO_FATAL_FAILURE( @@ -2509,7 +2511,7 @@ TEST_F(BucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_d EXPECT_EQ(uint64_t(n_buckets), read_only_global_db().size()); } -TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) { +TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) { getBucketDBUpdater().set_stale_reads_enabled(true); constexpr uint32_t n_buckets = 10; ASSERT_NO_FATAL_FAILURE( @@ -2523,7 +2525,7 @@ TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_vers ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5)); } -TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) { +TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) { getBucketDBUpdater().set_stale_reads_enabled(true); constexpr uint32_t n_buckets = 10; ASSERT_NO_FATAL_FAILURE( @@ -2539,7 +2541,7 @@ TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_without_pending_trans EXPECT_EQ(size_t(0), _sender.replies().size()); } -TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) { +TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) { // Need to trigger an initial edge to complete first bucket scan ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"), messageCount(1), 0)); @@ -2586,7 +2588,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) { EXPECT_EQ(size_t(0), mutable_global_db().size()); } -uint32_t BucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_benchmarking() { +uint32_t LegacyBucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_benchmarking() { // Need to trigger an initial edge to complete first bucket scan setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"), messageCount(1), 0); _sender.clear(); @@ -2622,7 +2624,7 @@ uint32_t BucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_ben return n_buckets; } -TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_storage_nodes) { +TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_storage_nodes) { const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking(); lib::ClusterState no_op_state("distributor:1 storage:1 .0.s:m"); // Removing all buckets via ownership @@ -2633,7 +2635,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_ fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets); } -TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_remover_db_pass) { +TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_remover_db_pass) { const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking(); // TODO this benchmark is void if we further restrict the pruning elision logic to allow @@ -2646,7 +2648,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_re fprintf(stderr, "Took %g seconds to scan %u buckets with no-op action\n", timer.min_time(), n_buckets); } -TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_remover_db_pass) { +TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_remover_db_pass) { const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking(); lib::ClusterState no_op_state("distributor:1 storage:1 .0.s:m"); // Removing all buckets via all replicas gone @@ -2657,7 +2659,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_r fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets); } -TEST_F(BucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) { +TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) { auto initial_baseline = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d"); auto initial_default = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m"); @@ -2682,7 +2684,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_s EXPECT_TRUE(state == nullptr); } -struct BucketDBUpdaterSnapshotTest : BucketDBUpdaterTest { +struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest { lib::ClusterState empty_state; std::shared_ptr<lib::ClusterState> initial_baseline; std::shared_ptr<lib::ClusterState> initial_default; @@ -2691,7 +2693,7 @@ struct BucketDBUpdaterSnapshotTest : BucketDBUpdaterTest { Bucket global_bucket; BucketDBUpdaterSnapshotTest() - : BucketDBUpdaterTest(), + : LegacyBucketDBUpdaterTest(), empty_state(), initial_baseline(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d")), initial_default(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m")), @@ -2704,7 +2706,7 @@ struct BucketDBUpdaterSnapshotTest : BucketDBUpdaterTest { ~BucketDBUpdaterSnapshotTest() override; void SetUp() override { - BucketDBUpdaterTest::SetUp(); + LegacyBucketDBUpdaterTest::SetUp(); getBucketDBUpdater().set_stale_reads_enabled(true); }; diff --git a/storage/src/tests/distributor/distributortest.cpp b/storage/src/tests/distributor/legacy_distributor_test.cpp index 9c3686d3614..3123b7fc91c 100644 --- a/storage/src/tests/distributor/distributortest.cpp +++ b/storage/src/tests/distributor/legacy_distributor_test.cpp @@ -33,9 +33,11 @@ using namespace ::testing; namespace storage::distributor { -struct DistributorTest : Test, DistributorTestUtil { - DistributorTest(); - ~DistributorTest() override; +// TODO STRIPE: Add variant of this test for the new stripe mode. +// TODO STRIPE: Remove this test when legacy mode is gone. +struct LegacyDistributorTest : Test, DistributorTestUtil { + LegacyDistributorTest(); + ~LegacyDistributorTest() override; // TODO handle edge case for window between getnodestate reply already // sent and new request not yet received @@ -233,17 +235,17 @@ struct DistributorTest : Test, DistributorTestUtil { void set_up_and_start_get_op_with_stale_reads_enabled(bool enabled); }; -DistributorTest::DistributorTest() +LegacyDistributorTest::LegacyDistributorTest() : Test(), DistributorTestUtil(), _bucketSpaces() { } -DistributorTest::~DistributorTest() = default; +LegacyDistributorTest::~LegacyDistributorTest() = default; // TODO -> stripe test -TEST_F(DistributorTest, operation_generation) { +TEST_F(LegacyDistributorTest, operation_generation) { setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1"); document::BucketId bid; @@ -262,7 +264,7 @@ TEST_F(DistributorTest, operation_generation) { } // TODO -> stripe test -TEST_F(DistributorTest, operations_generated_and_started_without_duplicates) { +TEST_F(LegacyDistributorTest, operations_generated_and_started_without_duplicates) { setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1"); for (uint32_t i = 0; i < 6; ++i) { @@ -278,7 +280,7 @@ TEST_F(DistributorTest, operations_generated_and_started_without_duplicates) { // TODO -> stripe test // TODO also need to impl/test cross-stripe cluster state changes -TEST_F(DistributorTest, recovery_mode_on_cluster_state_change) { +TEST_F(LegacyDistributorTest, recovery_mode_on_cluster_state_change) { setupDistributor(Redundancy(1), NodeCount(2), "storage:1 .0.s:d distributor:1"); enableDistributorClusterState("storage:1 distributor:1"); @@ -300,7 +302,7 @@ TEST_F(DistributorTest, recovery_mode_on_cluster_state_change) { // TODO -> stripe test // TODO how to throttle across stripes? -TEST_F(DistributorTest, operations_are_throttled) { +TEST_F(LegacyDistributorTest, operations_are_throttled) { setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1"); getConfig().setMinPendingMaintenanceOps(1); getConfig().setMaxPendingMaintenanceOps(1); @@ -313,7 +315,7 @@ TEST_F(DistributorTest, operations_are_throttled) { } // TODO -> stripe test -TEST_F(DistributorTest, handle_unknown_maintenance_reply) { +TEST_F(LegacyDistributorTest, handle_unknown_maintenance_reply) { setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1"); { @@ -333,7 +335,7 @@ TEST_F(DistributorTest, handle_unknown_maintenance_reply) { } // TODO -> generic, non distr/stripe test -TEST_F(DistributorTest, contains_time_statement) { +TEST_F(LegacyDistributorTest, contains_time_statement) { setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1"); EXPECT_FALSE(getConfig().containsTimeStatement("")); @@ -345,7 +347,7 @@ TEST_F(DistributorTest, contains_time_statement) { } // TODO -> stripe test -TEST_F(DistributorTest, update_bucket_database) { +TEST_F(LegacyDistributorTest, update_bucket_database) { enableDistributorClusterState("distributor:1 storage:3"); EXPECT_EQ("BucketId(0x4000000000000001) : " @@ -416,7 +418,7 @@ public: // TODO -> stripe test // TODO need to impl/test cross-stripe status requests -TEST_F(DistributorTest, tick_processes_status_requests) { +TEST_F(LegacyDistributorTest, tick_processes_status_requests) { setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1"); addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t"); @@ -446,7 +448,7 @@ TEST_F(DistributorTest, tick_processes_status_requests) { // TODO -> distributor test since it owns metric hook // TODO need to impl/test cross-stripe metrics aggregation -TEST_F(DistributorTest, metric_update_hook_updates_pending_maintenance_metrics) { +TEST_F(LegacyDistributorTest, metric_update_hook_updates_pending_maintenance_metrics) { setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); // To ensure we count all operations, not just those fitting within the // pending window. @@ -493,7 +495,7 @@ TEST_F(DistributorTest, metric_update_hook_updates_pending_maintenance_metrics) } // TODO -> stripe test -TEST_F(DistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_time_intervals) { +TEST_F(LegacyDistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_time_intervals) { getClock().setAbsoluteTimeInSeconds(1000); setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); @@ -532,7 +534,7 @@ TEST_F(DistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_tim // TODO -> stripe test // TODO need to impl/test cross-stripe config propagation -TEST_F(DistributorTest, priority_config_is_propagated_to_distributor_configuration) { +TEST_F(LegacyDistributorTest, priority_config_is_propagated_to_distributor_configuration) { using namespace vespa::config::content::core; setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); @@ -569,7 +571,7 @@ TEST_F(DistributorTest, priority_config_is_propagated_to_distributor_configurati } // TODO -> stripe test -TEST_F(DistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) { +TEST_F(LegacyDistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) { setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2"); lib::ClusterState newState("storage:10 distributor:10"); auto stateCmd = std::make_shared<api::SetSystemStateCommand>(newState); @@ -591,7 +593,7 @@ TEST_F(DistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state } // TODO -> stripe test -TEST_F(DistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_current_time) { +TEST_F(LegacyDistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_current_time) { setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2"); getClock().setAbsoluteTimeInSeconds(101234); document::BucketId bucket(16, 7654); @@ -605,7 +607,7 @@ TEST_F(DistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_cur } // TODO -> stripe test -TEST_F(DistributorTest, merge_stats_are_accumulated_during_database_iteration) { +TEST_F(LegacyDistributorTest, merge_stats_are_accumulated_during_database_iteration) { setupDistributor(Redundancy(2), NodeCount(3), "storage:3 distributor:1"); // Copies out of sync. Not possible for distributor to _reliably_ tell // which direction(s) data will flow, so for simplicity assume that we @@ -656,9 +658,9 @@ TEST_F(DistributorTest, merge_stats_are_accumulated_during_database_iteration) { } void -DistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucketTotal, uint16_t node, - const vespalib::string& bucketSpace, - const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& stats) +LegacyDistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucketTotal, uint16_t node, + const vespalib::string& bucketSpace, + const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& stats) { auto nodeItr = stats.find(node); ASSERT_TRUE(nodeItr != stats.end()); @@ -677,7 +679,7 @@ DistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucke * operations for the bucket. */ // TODO -> stripe test -TEST_F(DistributorTest, stats_generated_for_preempted_operations) { +TEST_F(LegacyDistributorTest, stats_generated_for_preempted_operations) { setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); // For this test it suffices to have a single bucket with multiple aspects // wrong about it. In this case, let a bucket be both out of sync _and_ @@ -702,7 +704,7 @@ TEST_F(DistributorTest, stats_generated_for_preempted_operations) { } // TODO -> distributor test -TEST_F(DistributorTest, host_info_reporter_config_is_propagated_to_reporter) { +TEST_F(LegacyDistributorTest, host_info_reporter_config_is_propagated_to_reporter) { setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); // Default is enabled=true. @@ -716,13 +718,13 @@ TEST_F(DistributorTest, host_info_reporter_config_is_propagated_to_reporter) { } // TODO -> stripe test (though config is a bit of a special case...) -TEST_F(DistributorTest, replica_counting_mode_is_configured_to_trusted_by_default) { +TEST_F(LegacyDistributorTest, replica_counting_mode_is_configured_to_trusted_by_default) { setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); EXPECT_EQ(ConfigBuilder::MinimumReplicaCountingMode::TRUSTED, currentReplicaCountingMode()); } // TODO -> stripe test -TEST_F(DistributorTest, replica_counting_mode_config_is_propagated_to_metric_updater) { +TEST_F(LegacyDistributorTest, replica_counting_mode_config_is_propagated_to_metric_updater) { setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); ConfigBuilder builder; builder.minimumReplicaCountingMode = ConfigBuilder::MinimumReplicaCountingMode::ANY; @@ -731,7 +733,7 @@ TEST_F(DistributorTest, replica_counting_mode_config_is_propagated_to_metric_upd } // TODO -> stripe test -TEST_F(DistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_propagated_to_internal_config) { +TEST_F(LegacyDistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_propagated_to_internal_config) { setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); ConfigBuilder builder; builder.maxConsecutivelyInhibitedMaintenanceTicks = 123; @@ -740,13 +742,13 @@ TEST_F(DistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_ } // TODO -> stripe test -TEST_F(DistributorTest, bucket_activation_is_enabled_by_default) { +TEST_F(LegacyDistributorTest, bucket_activation_is_enabled_by_default) { setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); EXPECT_FALSE(getConfig().isBucketActivationDisabled()); } // TODO -> stripe test -TEST_F(DistributorTest, bucket_activation_config_is_propagated_to_distributor_configuration) { +TEST_F(LegacyDistributorTest, bucket_activation_config_is_propagated_to_distributor_configuration) { using namespace vespa::config::content::core; setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); @@ -759,7 +761,7 @@ TEST_F(DistributorTest, bucket_activation_config_is_propagated_to_distributor_co } void -DistributorTest::configureMaxClusterClockSkew(int seconds) { +LegacyDistributorTest::configureMaxClusterClockSkew(int seconds) { using namespace vespa::config::content::core; ConfigBuilder builder; @@ -769,7 +771,7 @@ DistributorTest::configureMaxClusterClockSkew(int seconds) { } // TODO -> stripe test -TEST_F(DistributorTest, max_clock_skew_config_is_propagated_to_distributor_config) { +TEST_F(LegacyDistributorTest, max_clock_skew_config_is_propagated_to_distributor_config) { setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); configureMaxClusterClockSkew(5); @@ -794,7 +796,7 @@ auto make_dummy_get_command_for_bucket_1() { } -void DistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() { +void LegacyDistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() { ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size()); for (uint32_t i = 0; i < _sender.commands().size(); ++i) { ASSERT_EQ(api::MessageType::REQUESTBUCKETINFO, _sender.command(i)->getType()); @@ -814,11 +816,11 @@ void DistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() { _sender.commands().clear(); } -void DistributorTest::sendDownDummyRemoveCommand() { +void LegacyDistributorTest::sendDownDummyRemoveCommand() { _distributor->handleMessage(makeDummyRemoveCommand()); } -void DistributorTest::assertSingleBouncedRemoveReplyPresent() { +void LegacyDistributorTest::assertSingleBouncedRemoveReplyPresent() { ASSERT_EQ(1, _sender.replies().size()); // Rejected remove ASSERT_EQ(api::MessageType::REMOVE_REPLY, _sender.reply(0)->getType()); auto& reply(static_cast<api::RemoveReply&>(*_sender.reply(0))); @@ -826,7 +828,7 @@ void DistributorTest::assertSingleBouncedRemoveReplyPresent() { _sender.replies().clear(); } -void DistributorTest::assertNoMessageBounced() { +void LegacyDistributorTest::assertNoMessageBounced() { ASSERT_EQ(0, _sender.replies().size()); } @@ -834,7 +836,7 @@ void DistributorTest::assertNoMessageBounced() { // reply once we have the "highest timestamp across all owned buckets" feature // in place. // TODO where does this truly belong? -TEST_F(DistributorTest, configured_safe_time_point_rejection_works_end_to_end) { +TEST_F(LegacyDistributorTest, configured_safe_time_point_rejection_works_end_to_end) { setupDistributor(Redundancy(2), NodeCount(2), "bits:1 storage:1 distributor:2"); getClock().setAbsoluteTimeInSeconds(1000); @@ -854,7 +856,7 @@ TEST_F(DistributorTest, configured_safe_time_point_rejection_works_end_to_end) { ASSERT_NO_FATAL_FAILURE(assertNoMessageBounced()); } -void DistributorTest::configure_mutation_sequencing(bool enabled) { +void LegacyDistributorTest::configure_mutation_sequencing(bool enabled) { using namespace vespa::config::content::core; ConfigBuilder builder; @@ -864,7 +866,7 @@ void DistributorTest::configure_mutation_sequencing(bool enabled) { } // TODO -> stripe test -TEST_F(DistributorTest, sequencing_config_is_propagated_to_distributor_config) { +TEST_F(LegacyDistributorTest, sequencing_config_is_propagated_to_distributor_config) { setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); // Should be enabled by default @@ -880,7 +882,7 @@ TEST_F(DistributorTest, sequencing_config_is_propagated_to_distributor_config) { } void -DistributorTest::configure_merge_busy_inhibit_duration(int seconds) { +LegacyDistributorTest::configure_merge_busy_inhibit_duration(int seconds) { using namespace vespa::config::content::core; ConfigBuilder builder; @@ -890,7 +892,7 @@ DistributorTest::configure_merge_busy_inhibit_duration(int seconds) { } // TODO -> stripe test -TEST_F(DistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_distributor_config) { +TEST_F(LegacyDistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_distributor_config) { setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1"); configure_merge_busy_inhibit_duration(7); @@ -898,7 +900,7 @@ TEST_F(DistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_dist } // TODO -> stripe test -TEST_F(DistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker) { +TEST_F(LegacyDistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker) { setupDistributor(Redundancy(2), NodeCount(2), "storage:1 distributor:1"); addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t"); @@ -924,7 +926,7 @@ TEST_F(DistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_mes } // TODO -> stripe test -TEST_F(DistributorTest, external_client_requests_are_handled_individually_in_priority_order) { +TEST_F(LegacyDistributorTest, external_client_requests_are_handled_individually_in_priority_order) { setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1"); addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a"); @@ -953,7 +955,7 @@ TEST_F(DistributorTest, external_client_requests_are_handled_individually_in_pri } // TODO -> stripe test -TEST_F(DistributorTest, internal_messages_are_started_in_fifo_order_batch) { +TEST_F(LegacyDistributorTest, internal_messages_are_started_in_fifo_order_batch) { // To test internal request ordering, we use NotifyBucketChangeCommand // for the reason that it explicitly updates the bucket database for // each individual invocation. @@ -983,7 +985,7 @@ TEST_F(DistributorTest, internal_messages_are_started_in_fifo_order_batch) { // TODO -> stripe test // TODO also test that closing distributor closes stripes -TEST_F(DistributorTest, closing_aborts_priority_queued_client_requests) { +TEST_F(LegacyDistributorTest, closing_aborts_priority_queued_client_requests) { setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1"); document::BucketId bucket(16, 1); addNodesToBucketDB(bucket, "0=1/1/1/t"); @@ -1024,7 +1026,7 @@ void assert_invalid_stats_for_all_spaces( // TODO -> stripe test // TODO must impl/test cross-stripe bucket space stats // TODO cross-stripe recovery mode handling how? -TEST_F(DistributorTest, entering_recovery_mode_resets_bucket_space_stats) { +TEST_F(LegacyDistributorTest, entering_recovery_mode_resets_bucket_space_stats) { // Set up a cluster state + DB contents which implies merge maintenance ops setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2"); addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a"); @@ -1046,7 +1048,7 @@ TEST_F(DistributorTest, entering_recovery_mode_resets_bucket_space_stats) { } // TODO figure out interaction between stripes and distributors on this one -TEST_F(DistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_replies) { +TEST_F(LegacyDistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_replies) { setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2"); // Should not send explicit replies during init stage ASSERT_EQ(0, explicit_node_state_reply_send_invocations()); @@ -1067,7 +1069,7 @@ TEST_F(DistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_rep EXPECT_EQ(1, explicit_node_state_reply_send_invocations()); } -void DistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace space) { +void LegacyDistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace space) { setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2"); EXPECT_TRUE(distributor_is_in_recovery_mode()); // 2 buckets with missing replicas triggering merge pending stats @@ -1103,15 +1105,15 @@ void DistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace EXPECT_EQ(2, explicit_node_state_reply_send_invocations()); } -TEST_F(DistributorTest, pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies) { +TEST_F(LegacyDistributorTest, pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies) { do_test_pending_merge_getnodestate_reply_edge(FixedBucketSpaces::default_space()); } -TEST_F(DistributorTest, pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies) { +TEST_F(LegacyDistributorTest, pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies) { do_test_pending_merge_getnodestate_reply_edge(FixedBucketSpaces::global_space()); } -TEST_F(DistributorTest, stale_reads_config_is_propagated_to_external_operation_handler) { +TEST_F(LegacyDistributorTest, stale_reads_config_is_propagated_to_external_operation_handler) { createLinks(); setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1"); @@ -1122,7 +1124,7 @@ TEST_F(DistributorTest, stale_reads_config_is_propagated_to_external_operation_h EXPECT_FALSE(getExternalOperationHandler().concurrent_gets_enabled()); } -TEST_F(DistributorTest, fast_path_on_consistent_gets_config_is_propagated_to_internal_config) { +TEST_F(LegacyDistributorTest, fast_path_on_consistent_gets_config_is_propagated_to_internal_config) { createLinks(); setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1"); @@ -1133,7 +1135,7 @@ TEST_F(DistributorTest, fast_path_on_consistent_gets_config_is_propagated_to_int EXPECT_FALSE(getConfig().update_fast_path_restart_enabled()); } -TEST_F(DistributorTest, merge_disabling_config_is_propagated_to_internal_config) { +TEST_F(LegacyDistributorTest, merge_disabling_config_is_propagated_to_internal_config) { createLinks(); setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1"); @@ -1144,7 +1146,7 @@ TEST_F(DistributorTest, merge_disabling_config_is_propagated_to_internal_config) EXPECT_FALSE(getConfig().merge_operations_disabled()); } -TEST_F(DistributorTest, metadata_update_phase_config_is_propagated_to_internal_config) { +TEST_F(LegacyDistributorTest, metadata_update_phase_config_is_propagated_to_internal_config) { createLinks(); setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1"); @@ -1155,7 +1157,7 @@ TEST_F(DistributorTest, metadata_update_phase_config_is_propagated_to_internal_c EXPECT_FALSE(getConfig().enable_metadata_only_fetch_phase_for_inconsistent_updates()); } -TEST_F(DistributorTest, weak_internal_read_consistency_config_is_propagated_to_internal_configs) { +TEST_F(LegacyDistributorTest, weak_internal_read_consistency_config_is_propagated_to_internal_configs) { createLinks(); setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1"); @@ -1168,7 +1170,7 @@ TEST_F(DistributorTest, weak_internal_read_consistency_config_is_propagated_to_i EXPECT_FALSE(getExternalOperationHandler().use_weak_internal_read_consistency_for_gets()); } -void DistributorTest::set_up_and_start_get_op_with_stale_reads_enabled(bool enabled) { +void LegacyDistributorTest::set_up_and_start_get_op_with_stale_reads_enabled(bool enabled) { createLinks(); setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1"); configure_stale_reads_enabled(enabled); @@ -1178,7 +1180,7 @@ void DistributorTest::set_up_and_start_get_op_with_stale_reads_enabled(bool enab _distributor->onDown(make_dummy_get_command_for_bucket_1()); } -TEST_F(DistributorTest, gets_are_started_outside_main_distributor_logic_if_stale_reads_enabled) { +TEST_F(LegacyDistributorTest, gets_are_started_outside_main_distributor_logic_if_stale_reads_enabled) { set_up_and_start_get_op_with_stale_reads_enabled(true); ASSERT_THAT(_sender.commands(), SizeIs(1)); EXPECT_THAT(_sender.replies(), SizeIs(0)); @@ -1190,7 +1192,7 @@ TEST_F(DistributorTest, gets_are_started_outside_main_distributor_logic_if_stale EXPECT_THAT(_sender.replies(), SizeIs(1)); } -TEST_F(DistributorTest, gets_are_not_started_outside_main_distributor_logic_if_stale_reads_disabled) { +TEST_F(LegacyDistributorTest, gets_are_not_started_outside_main_distributor_logic_if_stale_reads_disabled) { set_up_and_start_get_op_with_stale_reads_enabled(false); // Get has been placed into distributor queue, so no external messages are produced. EXPECT_THAT(_sender.commands(), SizeIs(0)); @@ -1200,21 +1202,21 @@ TEST_F(DistributorTest, gets_are_not_started_outside_main_distributor_logic_if_s // There's no need or desire to track "lockfree" Gets in the main pending message tracker, // as we only have to track mutations to inhibit maintenance ops safely. Furthermore, // the message tracker is a multi-index and therefore has some runtime cost. -TEST_F(DistributorTest, gets_started_outside_main_thread_are_not_tracked_by_main_pending_message_tracker) { +TEST_F(LegacyDistributorTest, gets_started_outside_main_thread_are_not_tracked_by_main_pending_message_tracker) { set_up_and_start_get_op_with_stale_reads_enabled(true); Bucket bucket(FixedBucketSpaces::default_space(), BucketId(16, 1)); EXPECT_FALSE(pending_message_tracker().hasPendingMessage( 0, bucket, api::MessageType::GET_ID)); } -TEST_F(DistributorTest, closing_aborts_gets_started_outside_main_distributor_thread) { +TEST_F(LegacyDistributorTest, closing_aborts_gets_started_outside_main_distributor_thread) { set_up_and_start_get_op_with_stale_reads_enabled(true); _distributor->close(); ASSERT_EQ(1, _sender.replies().size()); EXPECT_EQ(api::ReturnCode::ABORTED, _sender.reply(0)->getResult().getResult()); } -TEST_F(DistributorTest, prioritize_global_bucket_merges_config_is_propagated_to_internal_config) { +TEST_F(LegacyDistributorTest, prioritize_global_bucket_merges_config_is_propagated_to_internal_config) { createLinks(); setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1"); @@ -1225,7 +1227,7 @@ TEST_F(DistributorTest, prioritize_global_bucket_merges_config_is_propagated_to_ EXPECT_FALSE(getConfig().prioritize_global_bucket_merges()); } -TEST_F(DistributorTest, max_activation_inhibited_out_of_sync_groups_config_is_propagated_to_internal_config) { +TEST_F(LegacyDistributorTest, max_activation_inhibited_out_of_sync_groups_config_is_propagated_to_internal_config) { createLinks(); setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1"); @@ -1236,7 +1238,7 @@ TEST_F(DistributorTest, max_activation_inhibited_out_of_sync_groups_config_is_pr EXPECT_EQ(getConfig().max_activation_inhibited_out_of_sync_groups(), 0); } -TEST_F(DistributorTest, wanted_split_bit_count_is_lower_bounded) { +TEST_F(LegacyDistributorTest, wanted_split_bit_count_is_lower_bounded) { createLinks(); setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1"); @@ -1247,7 +1249,7 @@ TEST_F(DistributorTest, wanted_split_bit_count_is_lower_bounded) { EXPECT_EQ(getConfig().getMinimalBucketSplit(), 8); } -TEST_F(DistributorTest, host_info_sent_immediately_once_all_stripes_first_reported) { +TEST_F(LegacyDistributorTest, host_info_sent_immediately_once_all_stripes_first_reported) { set_num_distributor_stripes(4); createLinks(); getClock().setAbsoluteTimeInSeconds(1000); @@ -1276,7 +1278,7 @@ TEST_F(DistributorTest, host_info_sent_immediately_once_all_stripes_first_report } // TODO STRIPE make delay configurable instead of hardcoded -TEST_F(DistributorTest, non_bootstrap_host_info_send_request_delays_sending) { +TEST_F(LegacyDistributorTest, non_bootstrap_host_info_send_request_delays_sending) { set_num_distributor_stripes(4); createLinks(); getClock().setAbsoluteTimeInSeconds(1000); diff --git a/storage/src/vespa/storage/config/distributorconfiguration.h b/storage/src/vespa/storage/config/distributorconfiguration.h index 479298ff082..7aa10893b80 100644 --- a/storage/src/vespa/storage/config/distributorconfiguration.h +++ b/storage/src/vespa/storage/config/distributorconfiguration.h @@ -9,7 +9,7 @@ namespace storage { -namespace distributor { struct DistributorTest; } +namespace distributor { struct LegacyDistributorTest; } class DistributorConfiguration { public: @@ -323,7 +323,7 @@ private: DistrConfig::MinimumReplicaCountingMode _minimumReplicaCountingMode; - friend struct distributor::DistributorTest; + friend struct distributor::LegacyDistributorTest; void configureMaintenancePriorities( const vespa::config::content::core::StorDistributormanagerConfig&); }; diff --git a/storage/src/vespa/storage/distributor/distributor.cpp b/storage/src/vespa/storage/distributor/distributor.cpp index 4e6ae90718c..6f9cbf3b0f2 100644 --- a/storage/src/vespa/storage/distributor/distributor.cpp +++ b/storage/src/vespa/storage/distributor/distributor.cpp @@ -353,15 +353,7 @@ get_bucket_id_for_striping(const api::StorageMessage& msg, const DistributorNode case api::MessageType::REMOVE_ID: return node_ctx.bucket_id_factory().getBucketId(dynamic_cast<const api::TestAndSetCommand&>(msg).getDocumentId()); case api::MessageType::REQUESTBUCKETINFO_REPLY_ID: - { - const auto& reply = dynamic_cast<const api::RequestBucketInfoReply&>(msg); - if (!reply.getBucketInfo().empty()) { - // Note: All bucket ids in this reply belong to the same distributor stripe, so we just use the first entry. - return reply.getBucketInfo()[0]._bucketId; - } else { - return reply.getBucketId(); - } - } + return dynamic_cast<const api::RequestBucketInfoReply&>(msg).super_bucket_id(); case api::MessageType::GET_ID: return node_ctx.bucket_id_factory().getBucketId(dynamic_cast<const api::GetCommand&>(msg).getDocumentId()); case api::MessageType::VISITOR_CREATE_ID: @@ -389,7 +381,7 @@ Distributor::stripe_of_bucket_id(const document::BucketId& bucket_id, const api: { if (!bucket_id.isSet()) { LOG(error, "Message (%s) has a bucket id (%s) that is not set. Cannot route to stripe", - msg.getSummary().c_str(), bucket_id.toString().c_str()); + msg.toString(true).c_str(), bucket_id.toString().c_str()); } assert(bucket_id.isSet()); if (bucket_id.getUsedBits() < spi::BucketLimits::MinUsedBits) { diff --git a/storage/src/vespa/storage/distributor/distributor.h b/storage/src/vespa/storage/distributor/distributor.h index 6f0808ad63d..41d88f5dba1 100644 --- a/storage/src/vespa/storage/distributor/distributor.h +++ b/storage/src/vespa/storage/distributor/distributor.h @@ -125,10 +125,10 @@ public: }; private: - friend struct DistributorTest; - friend class BucketDBUpdaterTest; friend class DistributorTestUtil; + friend class LegacyBucketDBUpdaterTest; friend class MetricUpdateHook; + friend struct LegacyDistributorTest; void setNodeStateUp(); bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg); diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h index e9dcb3e65fc..8f3de38aec7 100644 --- a/storage/src/vespa/storage/distributor/distributor_stripe.h +++ b/storage/src/vespa/storage/distributor/distributor_stripe.h @@ -194,13 +194,13 @@ public: bool tick() override; private: - // TODO reduce number of friends. DistributorStripe too popular for its own good. - friend struct DistributorTest; - friend class BucketDBUpdaterTest; + // TODO STRIPE: reduce number of friends. DistributorStripe too popular for its own good. + friend class Distributor; friend class DistributorTestUtil; + friend class LegacyBucketDBUpdaterTest; friend class MetricUpdateHook; - friend class Distributor; friend class MultiThreadedStripeAccessGuard; + friend struct LegacyDistributorTest; bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg); bool isMaintenanceReply(const api::StorageReply& reply) const; diff --git a/storageapi/src/vespa/storageapi/message/bucket.cpp b/storageapi/src/vespa/storageapi/message/bucket.cpp index 2e2ca82079d..2323a1ab0a4 100644 --- a/storageapi/src/vespa/storageapi/message/bucket.cpp +++ b/storageapi/src/vespa/storageapi/message/bucket.cpp @@ -476,6 +476,12 @@ RequestBucketInfoCommand::getBucket() const return document::Bucket(_bucketSpace, document::BucketId()); } +document::BucketId +RequestBucketInfoCommand::super_bucket_id() const +{ + return _buckets.empty() ? document::BucketId() : _buckets[0]; +} + void RequestBucketInfoCommand::print(std::ostream& out, bool verbose, const std::string& indent) const @@ -510,7 +516,8 @@ std::ostream& operator<<(std::ostream& out, const RequestBucketInfoReply::Entry& RequestBucketInfoReply::RequestBucketInfoReply(const RequestBucketInfoCommand& cmd) : StorageReply(cmd), _buckets(), - _full_bucket_fetch(cmd.hasSystemState()) + _full_bucket_fetch(cmd.hasSystemState()), + _super_bucket_id(cmd.super_bucket_id()) { } RequestBucketInfoReply::~RequestBucketInfoReply() = default; diff --git a/storageapi/src/vespa/storageapi/message/bucket.h b/storageapi/src/vespa/storageapi/message/bucket.h index 61766fb1f11..98445745753 100644 --- a/storageapi/src/vespa/storageapi/message/bucket.h +++ b/storageapi/src/vespa/storageapi/message/bucket.h @@ -358,6 +358,7 @@ public: const vespalib::string& getDistributionHash() const { return _distributionHash; } document::BucketSpace getBucketSpace() const { return _bucketSpace; } document::Bucket getBucket() const override; + document::BucketId super_bucket_id() const; void print(std::ostream& out, bool verbose, const std::string& indent) const override; @@ -388,6 +389,7 @@ public: private: EntryVector _buckets; bool _full_bucket_fetch; + document::BucketId _super_bucket_id; public: @@ -396,6 +398,7 @@ public: const EntryVector & getBucketInfo() const { return _buckets; } EntryVector & getBucketInfo() { return _buckets; } [[nodiscard]] bool full_bucket_fetch() const noexcept { return _full_bucket_fetch; } + const document::BucketId& super_bucket_id() const { return _super_bucket_id; } void print(std::ostream& out, bool verbose, const std::string& indent) const override; DECLARE_STORAGEREPLY(RequestBucketInfoReply, onRequestBucketInfoReply) }; diff --git a/vespa-feed-client/abi-spec.json b/vespa-feed-client/abi-spec.json index db9c1ff1a02..ecac167cd8e 100644 --- a/vespa-feed-client/abi-spec.json +++ b/vespa-feed-client/abi-spec.json @@ -158,8 +158,12 @@ ], "methods": [ "public void <init>(java.lang.String)", + "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.String)", "public void <init>(java.lang.String, java.lang.Throwable)", - "public void <init>(java.lang.Throwable)" + "public void <init>(java.lang.Throwable)", + "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.Throwable)", + "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.String, java.lang.Throwable)", + "public java.util.Optional documentId()" ], "fields": [] }, @@ -202,8 +206,8 @@ "abstract" ], "methods": [ - "public void onNextResult(ai.vespa.feed.client.Result, java.lang.Throwable)", - "public void onError(java.lang.Throwable)", + "public void onNextResult(ai.vespa.feed.client.Result, ai.vespa.feed.client.FeedException)", + "public void onError(ai.vespa.feed.client.FeedException)", "public void onComplete()" ], "fields": [] @@ -225,18 +229,6 @@ ], "fields": [] }, - "ai.vespa.feed.client.JsonParseException": { - "superClass": "ai.vespa.feed.client.FeedException", - "interfaces": [], - "attributes": [ - "public" - ], - "methods": [ - "public void <init>(java.lang.String)", - "public void <init>(java.lang.String, java.lang.Throwable)" - ], - "fields": [] - }, "ai.vespa.feed.client.OperationParameters": { "superClass": "java.lang.Object", "interfaces": [], @@ -261,6 +253,18 @@ ], "fields": [] }, + "ai.vespa.feed.client.OperationParseException": { + "superClass": "ai.vespa.feed.client.FeedException", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(java.lang.String)", + "public void <init>(java.lang.String, java.lang.Throwable)" + ], + "fields": [] + }, "ai.vespa.feed.client.OperationStats": { "superClass": "java.lang.Object", "interfaces": [], @@ -315,5 +319,17 @@ "public java.util.Optional traceMessage()" ], "fields": [] + }, + "ai.vespa.feed.client.ResultParseException": { + "superClass": "ai.vespa.feed.client.FeedException", + "interfaces": [], + "attributes": [ + "public" + ], + "methods": [ + "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.String)", + "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.Throwable)" + ], + "fields": [] } }
\ No newline at end of file diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java index 250809a48b9..952edfb5464 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java @@ -12,13 +12,21 @@ import java.util.concurrent.CompletableFuture; */ public interface FeedClient extends Closeable { - /** Send a document put with the given parameters, returning a future with the result of the operation. */ + /** + * Send a document put with the given parameters, returning a future with the result of the operation. + * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes. + * */ CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params); - /** Send a document update with the given parameters, returning a future with the result of the operation. */ + /** + * Send a document update with the given parameters, returning a future with the result of the operation. + * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes. + * */ CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params); - /** Send a document remove with the given parameters, returning a future with the result of the operation. */ + /** Send a document remove with the given parameters, returning a future with the result of the operation. + * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes. + * */ CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params); /** Returns a snapshot of the stats for this feed client, such as requests made, and responses by status. */ diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java index e1c6c733e9c..54e11d3a185 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java @@ -1,6 +1,8 @@ // Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. package ai.vespa.feed.client; +import java.util.Optional; + /** * Signals that an error occurred during feeding * @@ -8,10 +10,38 @@ package ai.vespa.feed.client; */ public class FeedException extends RuntimeException { - public FeedException(String message) { super(message); } + private final DocumentId documentId; + + public FeedException(String message) { + super(message); + this.documentId = null; + } + + public FeedException(DocumentId documentId, String message) { + super(message); + this.documentId = documentId; + } + + public FeedException(String message, Throwable cause) { + super(message, cause); + this.documentId = null; + } + + public FeedException(Throwable cause) { + super(cause); + this.documentId = null; + } + + public FeedException(DocumentId documentId, Throwable cause) { + super(cause); + this.documentId = documentId; + } - public FeedException(String message, Throwable cause) { super(message, cause); } + public FeedException(DocumentId documentId, String message, Throwable cause) { + super(message, cause); + this.documentId = documentId; + } - public FeedException(Throwable cause) { super(cause); } + public Optional<DocumentId> documentId() { return Optional.ofNullable(documentId); } } diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java index b160cced4b9..2269c56cde4 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java @@ -6,7 +6,6 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; import java.io.IOException; -import java.io.UncheckedIOException; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.util.HashMap; @@ -102,7 +101,10 @@ class HttpFeedClient implements FeedClient { try { JsonParser parser = factory.createParser(response.body()); if (parser.nextToken() != JsonToken.START_OBJECT) - throw new IllegalArgumentException("Expected '" + JsonToken.START_OBJECT + "', but found '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8)); + throw new ResultParseException( + documentId, + "Expected '" + JsonToken.START_OBJECT + "', but found '" + parser.currentToken() + "' in: " + + new String(response.body(), UTF_8)); String name; while ((name = parser.nextFieldName()) != null) { @@ -114,15 +116,20 @@ class HttpFeedClient implements FeedClient { } if (parser.currentToken() != JsonToken.END_OBJECT) - throw new IllegalArgumentException("Expected '" + JsonToken.END_OBJECT + "', but found '" + parser.currentToken() + "' in: " + new String(response.body(), UTF_8)); + throw new ResultParseException( + documentId, + "Expected '" + JsonToken.END_OBJECT + "', but found '" + parser.currentToken() + "' in: " + + new String(response.body(), UTF_8)); } catch (IOException e) { - throw new UncheckedIOException(e); + throw new ResultParseException(documentId, e); } if (type == null) // Not a Vespa response, but a failure in the HTTP layer. - throw new FeedException("Status " + response.code() + " executing '" + request + - "': " + (message == null ? new String(response.body(), UTF_8) : message)); + throw new ResultParseException( + documentId, + "Status " + response.code() + " executing '" + request + "': " + + (message == null ? new String(response.body(), UTF_8) : message)); return new Result(type, documentId, message, trace); } diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java index 6b2aec5d8b3..98ff3a5d921 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java @@ -228,6 +228,14 @@ class HttpRequestStrategy implements RequestStrategy { releaseSlot(); }); + result.handle((response, error) -> { + if (error != null) { + if (error instanceof FeedException) throw (FeedException)error; + throw new FeedException(documentId, error); + } + return response; + }); + return result; } diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java index b3a7aca1808..0ba373eef18 100644 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java @@ -10,7 +10,6 @@ import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.io.InterruptedIOException; -import java.io.UncheckedIOException; import java.time.Duration; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; @@ -58,13 +57,13 @@ public class JsonFeeder implements Closeable { * @param result Non-null if operation completed successfully * @param error Non-null if operation failed */ - default void onNextResult(Result result, Throwable error) { } + default void onNextResult(Result result, FeedException error) { } /** * Invoked if an unrecoverable error occurred during feed processing, * after which no other {@link ResultCallback} methods are invoked. */ - default void onError(Throwable error) { } + default void onError(FeedException error) { } /** * Invoked when all feed operations are either completed successfully or failed. @@ -81,6 +80,7 @@ public class JsonFeeder implements Closeable { * "fields": { ... document fields ... } * } * </pre> + * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes. */ public CompletableFuture<Result> feedSingle(String json) { CompletableFuture<Result> result = new CompletableFuture<>(); @@ -94,7 +94,7 @@ public class JsonFeeder implements Closeable { } }, resultExecutor); } catch (Exception e) { - resultExecutor.execute(() -> result.completeExceptionally(e)); + resultExecutor.execute(() -> result.completeExceptionally(wrapException(e))); } return result; } @@ -123,27 +123,32 @@ public class JsonFeeder implements Closeable { * ] * </pre> * Note that {@code "id"} is an alias for the document put operation. + * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes. */ public CompletableFuture<Void> feedMany(InputStream jsonStream, ResultCallback resultCallback) { return feedMany(jsonStream, 1 << 26, resultCallback); } + /** + * Same as {@link #feedMany(InputStream, ResultCallback)}, but without a provided {@link ResultCallback} instance. + * @see JsonFeeder#feedMany(InputStream, ResultCallback) for details. + */ public CompletableFuture<Void> feedMany(InputStream jsonStream) { return feedMany(jsonStream, new ResultCallback() { }); } CompletableFuture<Void> feedMany(InputStream jsonStream, int size, ResultCallback resultCallback) { - RingBufferStream buffer = new RingBufferStream(jsonStream, size); CompletableFuture<Void> overallResult = new CompletableFuture<>(); CompletableFuture<Result> result; AtomicInteger pending = new AtomicInteger(1); // The below dispatch loop itself is counted as a single pending operation AtomicBoolean finalCallbackInvoked = new AtomicBoolean(); try { + RingBufferStream buffer = new RingBufferStream(jsonStream, size); while ((result = buffer.next()) != null) { pending.incrementAndGet(); result.whenCompleteAsync((r, t) -> { if (!finalCallbackInvoked.get()) { - resultCallback.onNextResult(r, t); + resultCallback.onNextResult(r, (FeedException) t); } if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) { resultCallback.onComplete(); @@ -160,8 +165,9 @@ public class JsonFeeder implements Closeable { } catch (Exception e) { if (finalCallbackInvoked.compareAndSet(false, true)) { resultExecutor.execute(() -> { - resultCallback.onError(e); - overallResult.completeExceptionally(e); + FeedException wrapped = wrapException(e); + resultCallback.onError(wrapped); + overallResult.completeExceptionally(wrapped); }); } } @@ -182,6 +188,14 @@ public class JsonFeeder implements Closeable { } } + private FeedException wrapException(Exception e) { + if (e instanceof FeedException) return (FeedException) e; + if (e instanceof IOException) { + return new OperationParseException("Failed to parse document JSON: " + e.getMessage(), e); + } + return new FeedException(e); + } + private class RingBufferStream extends InputStream { private final byte[] b = new byte[1]; @@ -189,22 +203,21 @@ public class JsonFeeder implements Closeable { private final byte[] data; private final int size; private final Object lock = new Object(); - private Throwable thrown = null; + private IOException thrown = null; private long tail = 0; private long pos = 0; private long head = 0; private boolean done = false; private final OperationParserAndExecutor parserAndExecutor; - RingBufferStream(InputStream in, int size) { + RingBufferStream(InputStream in, int size) throws IOException { this.in = in; this.data = new byte[size]; this.size = size; new Thread(this::fill, "feed-reader").start(); - try { this.parserAndExecutor = new RingBufferBackedOperationParserAndExecutor(factory.createParser(this)); } - catch (IOException e) { throw new UncheckedIOException(e); } + this.parserAndExecutor = new RingBufferBackedOperationParserAndExecutor(factory.createParser(this)); } @Override @@ -220,7 +233,7 @@ public class JsonFeeder implements Closeable { while ((ready = (int) (head - pos)) == 0 && ! done) lock.wait(); } - if (thrown != null) throw new RuntimeException("Error reading input", thrown); + if (thrown != null) throw thrown; if (ready == 0) return -1; ready = min(ready, len); @@ -273,7 +286,7 @@ public class JsonFeeder implements Closeable { while (true) { int free; synchronized (lock) { - while ((free = (int) (tail + size - head)) <= 0 && ! done) + while ((free = (int) (tail + size - head)) <= 0 && !done) lock.wait(); } if (done) break; @@ -288,18 +301,22 @@ public class JsonFeeder implements Closeable { lock.notify(); } } - } - catch (Throwable t) { + } catch (InterruptedException e) { synchronized (lock) { done = true; - thrown = t; + thrown = new InterruptedIOException("Interrupted reading data: " + e.getMessage()); + } + } catch (IOException e) { + synchronized (lock) { + done = true; + thrown = e; } } } private class RingBufferBackedOperationParserAndExecutor extends OperationParserAndExecutor { - RingBufferBackedOperationParserAndExecutor(JsonParser parser) throws IOException { super(parser, true); } + RingBufferBackedOperationParserAndExecutor(JsonParser parser) { super(parser, true); } @Override String getDocumentJson(long start, long end) { @@ -334,7 +351,7 @@ public class JsonFeeder implements Closeable { private final boolean multipleOperations; private boolean arrayPrefixParsed; - protected OperationParserAndExecutor(JsonParser parser, boolean multipleOperations) throws IOException { + protected OperationParserAndExecutor(JsonParser parser, boolean multipleOperations) { this.parser = parser; this.multipleOperations = multipleOperations; } @@ -342,82 +359,78 @@ public class JsonFeeder implements Closeable { abstract String getDocumentJson(long start, long end); CompletableFuture<Result> next() throws IOException { - try { - if (multipleOperations && !arrayPrefixParsed){ - expect(START_ARRAY); - arrayPrefixParsed = true; - } + if (multipleOperations && !arrayPrefixParsed){ + expect(START_ARRAY); + arrayPrefixParsed = true; + } - JsonToken token = parser.nextToken(); - if (token == END_ARRAY && multipleOperations) return null; - else if (token == null && !multipleOperations) return null; - else if (token == START_OBJECT); - else throw new JsonParseException("Unexpected token '" + parser.currentToken() + "' at offset " + parser.getTokenLocation().getByteOffset()); - long start = 0, end = -1; - OperationType type = null; - DocumentId id = null; - OperationParameters parameters = protoParameters; - loop: while (true) { - switch (parser.nextToken()) { - case FIELD_NAME: - switch (parser.getText()) { - case "id": - case "put": type = PUT; id = readId(); break; - case "update": type = UPDATE; id = readId(); break; - case "remove": type = REMOVE; id = readId(); break; - case "condition": parameters = parameters.testAndSetCondition(readString()); break; - case "create": parameters = parameters.createIfNonExistent(readBoolean()); break; - case "fields": { - expect(START_OBJECT); - start = parser.getTokenLocation().getByteOffset(); - int depth = 1; - while (depth > 0) switch (parser.nextToken()) { - case START_OBJECT: ++depth; break; - case END_OBJECT: --depth; break; - } - end = parser.getTokenLocation().getByteOffset() + 1; - break; + JsonToken token = parser.nextToken(); + if (token == END_ARRAY && multipleOperations) return null; + else if (token == null && !multipleOperations) return null; + else if (token == START_OBJECT); + else throw new OperationParseException("Unexpected token '" + parser.currentToken() + "' at offset " + parser.getTokenLocation().getByteOffset()); + long start = 0, end = -1; + OperationType type = null; + DocumentId id = null; + OperationParameters parameters = protoParameters; + loop: while (true) { + switch (parser.nextToken()) { + case FIELD_NAME: + switch (parser.getText()) { + case "id": + case "put": type = PUT; id = readId(); break; + case "update": type = UPDATE; id = readId(); break; + case "remove": type = REMOVE; id = readId(); break; + case "condition": parameters = parameters.testAndSetCondition(readString()); break; + case "create": parameters = parameters.createIfNonExistent(readBoolean()); break; + case "fields": { + expect(START_OBJECT); + start = parser.getTokenLocation().getByteOffset(); + int depth = 1; + while (depth > 0) switch (parser.nextToken()) { + case START_OBJECT: ++depth; break; + case END_OBJECT: --depth; break; } - default: throw new JsonParseException("Unexpected field name '" + parser.getText() + "' at offset " + - parser.getTokenLocation().getByteOffset()); + end = parser.getTokenLocation().getByteOffset() + 1; + break; } - break; + default: throw new OperationParseException("Unexpected field name '" + parser.getText() + "' at offset " + + parser.getTokenLocation().getByteOffset()); + } + break; - case END_OBJECT: - break loop; + case END_OBJECT: + break loop; - default: - throw new JsonParseException("Unexpected token '" + parser.currentToken() + "' at offset " + - parser.getTokenLocation().getByteOffset()); - } - } - if (id == null) - throw new JsonParseException("No document id for document at offset " + start); - - if (end < start) - throw new JsonParseException("No 'fields' object for document at offset " + parser.getTokenLocation().getByteOffset()); - String payload = getDocumentJson(start, end); - switch (type) { - case PUT: return client.put (id, payload, parameters); - case UPDATE: return client.update(id, payload, parameters); - case REMOVE: return client.remove(id, parameters); - default: throw new JsonParseException("Unexpected operation type '" + type + "'"); + default: + throw new OperationParseException("Unexpected token '" + parser.currentToken() + "' at offset " + + parser.getTokenLocation().getByteOffset()); } - } catch (com.fasterxml.jackson.core.JacksonException e) { - throw new JsonParseException("Failed to parse JSON", e); + } + if (id == null) + throw new OperationParseException("No document id for document at offset " + start); + + if (end < start) + throw new OperationParseException("No 'fields' object for document at offset " + parser.getTokenLocation().getByteOffset()); + String payload = getDocumentJson(start, end); + switch (type) { + case PUT: return client.put (id, payload, parameters); + case UPDATE: return client.update(id, payload, parameters); + case REMOVE: return client.remove(id, parameters); + default: throw new OperationParseException("Unexpected operation type '" + type + "'"); } } private void expect(JsonToken token) throws IOException { if (parser.nextToken() != token) - throw new JsonParseException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() + + throw new OperationParseException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() + ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); } private String readString() throws IOException { String value = parser.nextTextValue(); if (value == null) - throw new JsonParseException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() + + throw new OperationParseException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() + ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); return value; @@ -426,7 +439,7 @@ public class JsonFeeder implements Closeable { private boolean readBoolean() throws IOException { Boolean value = parser.nextBooleanValue(); if (value == null) - throw new JsonParseException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() + + throw new OperationParseException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() + ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")"); return value; @@ -439,7 +452,6 @@ public class JsonFeeder implements Closeable { } - public static class Builder { final FeedClient client; diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java deleted file mode 100644 index 8edf74ec275..00000000000 --- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonParseException.java +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -package ai.vespa.feed.client; - -/** - * Signals that supplied JSON is invalid - * - * @author bjorncs - */ -public class JsonParseException extends FeedException { - - public JsonParseException(String message) { super(message); } - - public JsonParseException(String message, Throwable cause) { super(message, cause); } - -} diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java new file mode 100644 index 00000000000..15ba024bb4e --- /dev/null +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java @@ -0,0 +1,15 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.feed.client; + +/** + * Signals that supplied JSON for a document/operation is invalid + * + * @author bjorncs + */ +public class OperationParseException extends FeedException { + + public OperationParseException(String message) { super(message); } + + public OperationParseException(String message, Throwable cause) { super(message, cause); } + +} diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java new file mode 100644 index 00000000000..3fd5143e2f4 --- /dev/null +++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java @@ -0,0 +1,14 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.feed.client; + +/** + * Signals that the client was unable to parse the result/response from container + * + * @author bjorncs + */ +public class ResultParseException extends FeedException { + + public ResultParseException(DocumentId documentId, String message) { super(documentId, message); } + + public ResultParseException(DocumentId documentId, Throwable cause) { super(documentId, cause); } +} diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java index 03194e23d47..3e0f886a40a 100644 --- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java @@ -46,7 +46,7 @@ class JsonFeederTest { " }\n" + " }\n" + "]"; - AtomicReference<Throwable> exceptionThrow = new AtomicReference<>(); + AtomicReference<FeedException> exceptionThrow = new AtomicReference<>(); Path tmpFile = Files.createTempFile(null, null); Files.write(tmpFile, json.getBytes(UTF_8)); try (InputStream in = Files.newInputStream(tmpFile, StandardOpenOption.READ, StandardOpenOption.DELETE_ON_CLOSE)) { @@ -58,10 +58,10 @@ class JsonFeederTest { .feedMany(in, 1 << 7, new JsonFeeder.ResultCallback() { // TODO: hangs when buffer is smaller than largest document @Override - public void onNextResult(Result result, Throwable error) { resultsReceived.incrementAndGet(); } + public void onNextResult(Result result, FeedException error) { resultsReceived.incrementAndGet(); } @Override - public void onError(Throwable error) { exceptionThrow.set(error); } + public void onError(FeedException error) { exceptionThrow.set(error); } @Override public void onComplete() { completedSuccessfully.set(true); } diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java new file mode 100644 index 00000000000..1e616f2625a --- /dev/null +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java @@ -0,0 +1,92 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.feed.client.examples; + +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedClientBuilder; +import ai.vespa.feed.client.FeedException; +import ai.vespa.feed.client.JsonFeeder; +import ai.vespa.feed.client.Result; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Logger; + +/** + * Sample feeder demonstrating how to programmatically feed to a Vespa cluster. + */ +class JsonFileFeederExample implements Closeable { + + private final static Logger log = Logger.getLogger(JsonFileFeederExample.class.getName()); + + private final JsonFeeder jsonFeeder; + private final URI endpoint; + + static class ResultCallBack implements JsonFeeder.ResultCallback { + + final AtomicInteger resultsReceived = new AtomicInteger(0); + final AtomicInteger errorsReceived = new AtomicInteger(0); + final long startTimeMillis = System.currentTimeMillis();; + + @Override + public void onNextResult(Result result, FeedException error) { + resultsReceived.incrementAndGet(); + if (error != null) { + log.warning("Problems with feeding document " + + error.documentId().map(DocumentId::toString).orElse("<unknown>")); + errorsReceived.incrementAndGet(); + } else if (result.type() == Result.Type.failure) { + log.warning("Problems with docID " + result.documentId() + ":" + error); + errorsReceived.incrementAndGet(); + } + } + + @Override + public void onError(FeedException error) { + log.severe("Feeding failed for d: " + error.getMessage()); + } + + @Override + public void onComplete() { + log.info("Feeding completed"); + } + + void dumpStatsToLog() { + log.info("Received in total " + resultsReceived.get() + ", " + errorsReceived.get() + " errors."); + log.info("Time spent receiving is " + (System.currentTimeMillis() - startTimeMillis) + " ms."); + } + + } + + JsonFileFeederExample(URI endpoint) { + this.endpoint = endpoint; + FeedClient feedClient = FeedClientBuilder.create(endpoint) + .build(); + this.jsonFeeder = JsonFeeder.builder(feedClient) + .withTimeout(Duration.ofSeconds(30)) + .build(); + } + + /** + * Feed all operations from a stream. + * + * @param stream The input stream to read operations from (JSON array containing one or more document operations). + */ + void batchFeed(InputStream stream, String batchId) { + ResultCallBack callback = new ResultCallBack(); + log.info("Starting feed to " + endpoint + " for batch '" + batchId + "'"); + CompletableFuture<Void> promise = jsonFeeder.feedMany(stream, callback); + promise.join(); // wait for feeding to complete + callback.dumpStatsToLog(); + } + + @Override + public void close() throws IOException { + jsonFeeder.close(); + } +} diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java new file mode 100644 index 00000000000..5cee776b244 --- /dev/null +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java @@ -0,0 +1,117 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.feed.client.examples; + +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedClientBuilder; +import ai.vespa.feed.client.OperationParameters; +import ai.vespa.feed.client.Result; + +import java.net.URI; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Simple Streaming feeder implementation which will send operations to a Vespa endpoint. + * Other threads communicate with the feeder by adding new operations on the BlockingQueue + */ + +class JsonStreamFeederExample extends Thread implements AutoCloseable { + + static class Operation { + final String type; + final String documentId; + final String documentFieldsJson; + + Operation(String type, String id, String fields) { + this.type = type; + this.documentId = id; + this.documentFieldsJson = fields; + } + } + + private final static Logger log = Logger.getLogger(JsonStreamFeederExample.class.getName()); + + private final BlockingQueue<Operation> operations; + private final FeedClient feedClient; + private final AtomicBoolean drain = new AtomicBoolean(false); + private final CountDownLatch finishedDraining = new CountDownLatch(1); + private final AtomicInteger resultCounter = new AtomicInteger(); + + /** + * Constructor + * @param operations The shared blocking queue where other threads can put document operations to. + * @param endpoint The endpoint to feed to + */ + JsonStreamFeederExample(BlockingQueue<JsonStreamFeederExample.Operation> operations, URI endpoint) { + this.operations = operations; + this.feedClient = FeedClientBuilder.create(endpoint).build(); + } + + /** + * Shutdown this feeder, waits until operations on queue is drained + */ + @Override + public void close() { + log.info("Shutdown initiated, awaiting operations queue to be drained. Queue size is " + operations.size()); + drain.set(true); + try { + finishedDraining.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + @Override + public void run() { + while (!drain.get() || !operations.isEmpty()) { + try { + JsonStreamFeederExample.Operation op = operations.poll(1, TimeUnit.SECONDS); + if(op == null) // no operations available + continue; + log.info("Put document " + op.documentId); + CompletableFuture<Result> promise; + DocumentId docId = DocumentId.of(op.documentId); + OperationParameters params = OperationParameters.empty(); + String json = op.documentFieldsJson; + switch (op.type) { + case "put": + promise = feedClient.put(docId, json, params); + break; + case "remove": + promise = feedClient.remove(docId, params); + break; + case "update": + promise = feedClient.update(docId, json, params); + break; + default: + throw new IllegalArgumentException("Invalid operation: " + op.type); + } + promise.whenComplete((result, throwable) -> { + if (resultCounter.getAndIncrement() % 10 == 0) { + System.err.println(feedClient.stats()); + } + if (throwable != null) { + System.err.printf("Failure for '%s': %s", docId, throwable); + throwable.printStackTrace(); + } else if (result.type() == Result.Type.failure) { + System.err.printf("Failure for '%s': %s", docId, result.resultMessage().orElse("<no messsage>")); + } + }); + } catch (InterruptedException e) { + log.log(Level.SEVERE, "Got interrupt exception.", e); + break; + } + } + log.info("Shutting down feeding thread"); + this.feedClient.close(); + finishedDraining.countDown(); + } + +}
\ No newline at end of file diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java new file mode 100644 index 00000000000..5ece9051e41 --- /dev/null +++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java @@ -0,0 +1,34 @@ +// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. +package ai.vespa.feed.client.examples; + +import ai.vespa.feed.client.DocumentId; +import ai.vespa.feed.client.FeedClient; +import ai.vespa.feed.client.FeedClientBuilder; +import ai.vespa.feed.client.OperationParameters; +import ai.vespa.feed.client.Result; + +import java.net.URI; +import java.time.Duration; +import java.util.concurrent.CompletableFuture; + +class SimpleExample { + + public static void main(String[] args) { + try (FeedClient client = FeedClientBuilder.create(URI.create("https://my-container-endpoint-with-http2:8080/")).build()) { + DocumentId id = DocumentId.of("namespace", "documenttype", "1"); + String json = "{\"fields\": {\"title\": \"hello world\"}}"; + OperationParameters params = OperationParameters.empty() + .timeout(Duration.ofSeconds(5)) + .route("myvesparoute"); + CompletableFuture<Result> promise = client.put(id, json, params); + promise.whenComplete(((result, throwable) -> { + if (throwable != null) { + throwable.printStackTrace(); + } else { + System.out.printf("'%s' for document '%s': %s%n", result.type(), result.documentId(), result.resultMessage()); + } + })); + } + } + +} diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java index 15b3d2e9d7d..1c370b14b82 100644 --- a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java +++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java @@ -6,7 +6,7 @@ import ai.vespa.feed.client.DryrunResult; import ai.vespa.feed.client.FeedClient; import ai.vespa.feed.client.FeedClientBuilder; import ai.vespa.feed.client.JsonFeeder; -import ai.vespa.feed.client.JsonParseException; +import ai.vespa.feed.client.OperationParseException; import ai.vespa.feed.client.OperationParameters; import ai.vespa.feed.client.OperationStats; import ai.vespa.feed.client.Result; @@ -23,6 +23,7 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ThreadLocalRandom; +import java.util.logging.Level; import java.util.logging.Logger; import static java.util.stream.Collectors.toList; @@ -54,13 +55,13 @@ public class VespaRecordWriter extends RecordWriter<Object, Object> { feeder.feedSingle(json) .whenComplete((result, error) -> { if (error != null) { - if (error instanceof JsonParseException) { + if (error instanceof OperationParseException) { counters.incrementDocumentsSkipped(1); } else { String msg = "Failed to feed single document: " + error; System.out.println(msg); System.err.println(msg); - log.warning(msg); + log.log(Level.WARNING, msg, error); counters.incrementDocumentsFailed(1); } } else { diff --git a/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h b/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h index 372a2191a88..11c87d3b7e9 100644 --- a/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h +++ b/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h @@ -33,7 +33,7 @@ private: ssize_t flush_all(); // -1/0 -> error/ok public: CryptoCodecAdapter(SocketHandle socket, std::unique_ptr<CryptoCodec> codec) - : _input(64_Ki), _output(64_Ki), _socket(std::move(socket)), _codec(std::move(codec)), + : _input(0), _output(0), _socket(std::move(socket)), _codec(std::move(codec)), _got_tls_close(false), _encoded_tls_close(false) {} void inject_read_data(const char *buf, size_t len) override; int get_fd() const override { return _socket.get(); } |