summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--client/go/Makefile5
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java3
-rw-r--r--config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java4
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java1
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/DocumentGraphValidatorTest.java1
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/DocumentReferenceResolverTest.java1
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/SchemaOrdererTestCase.java1
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/VsmFieldsTestCase.java1
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/ParentChildSearchModel.java1
-rw-r--r--config/src/vespa/config/common/configvalue.cpp2
-rw-r--r--config/src/vespa/config/common/configvalue.h4
-rw-r--r--config/src/vespa/config/frt/frtconfigagent.cpp2
-rw-r--r--config/src/vespa/config/subscription/configsubscription.cpp6
-rw-r--r--configdefinitions/abi-spec.json1
-rw-r--r--configdefinitions/pom.xml4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java9
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java27
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java30
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java277
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyId.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java99
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/ExclusiveZoneRoutingContext.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java15
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java36
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java8
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2-with-patches.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java91
-rw-r--r--dist/vespa.spec7
-rw-r--r--document/src/main/java/com/yahoo/document/DocumentTypeManager.java7
-rw-r--r--document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java1
-rw-r--r--document/src/main/java/com/yahoo/document/ReferenceDataType.java3
-rw-r--r--document/src/main/java/com/yahoo/document/TemporaryDataType.java2
-rw-r--r--document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java2
-rw-r--r--document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java1
-rw-r--r--document/src/test/java/com/yahoo/document/ReferenceDataTypeTestCase.java2
-rw-r--r--document/src/test/java/com/yahoo/document/TemporaryDataTypeTestCase.java1
-rw-r--r--document/src/test/java/com/yahoo/document/TemporaryStructuredDataTypeTestCase.java1
-rw-r--r--jdisc_core/abi-spec.json1265
-rw-r--r--jdisc_core/pom.xml4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java9
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java33
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java26
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java157
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java18
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java4
-rw-r--r--predicate-search-core/abi-spec.json309
-rw-r--r--predicate-search-core/pom.xml4
-rw-r--r--predicate-search/abi-spec.json159
-rw-r--r--predicate-search/pom.xml4
-rw-r--r--screwdriver.yaml3
-rw-r--r--searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp19
-rw-r--r--searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp1
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp2
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp19
-rw-r--r--searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp19
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/feedstates.cpp9
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/i_shared_threading_service.h6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/ifeedview.h11
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/proton.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/proton.h5
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/shared_threading_service.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/shared_threading_service.h5
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp26
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h13
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/dummy_feed_view.h6
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/mock_shared_threading_service.h6
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/isequencedtaskexecutor.h4
-rw-r--r--storage/src/tests/distributor/mergeoperationtest.cpp11
-rw-r--r--storage/src/tests/distributor/statecheckerstest.cpp2
-rw-r--r--storage/src/vespa/storage/config/stor-distributormanager.def2
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp1
-rw-r--r--vespalib/CMakeLists.txt1
-rw-r--r--vespalib/src/tests/invokeservice/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/invokeservice/invokeservice_test.cpp62
-rw-r--r--vespalib/src/vespa/vespalib/util/CMakeLists.txt1
-rw-r--r--vespalib/src/vespa/vespalib/util/executor.h18
-rw-r--r--vespalib/src/vespa/vespalib/util/invokeservice.h20
-rw-r--r--vespalib/src/vespa/vespalib/util/invokeserviceimpl.cpp86
-rw-r--r--vespalib/src/vespa/vespalib/util/invokeserviceimpl.h36
101 files changed, 2747 insertions, 461 deletions
diff --git a/client/go/Makefile b/client/go/Makefile
index 58d15e9c139..fc1ac843917 100644
--- a/client/go/Makefile
+++ b/client/go/Makefile
@@ -111,7 +111,10 @@ manpages: install
$(BIN)/vespa man $(SHARE)/man/man1
clean:
- rm -rf $(BIN) $(SHARE) $(DIST)
+ rm -rf $(DIST)
+ rm -f $(BIN)/vespa $(SHARE)/man/man1/vespa.1 $(SHARE)/man/man1/vespa-*.1
+ rmdir -p $(BIN) &> /dev/null || true
+ rmdir -p $(SHARE)/man/man1 &> /dev/null || true
test:
env $(GOPROXY_OVERRIDE) go test ./...
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
index 6f77dce8fc5..07b8462f4d1 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
@@ -477,9 +477,10 @@ public class DeploymentSpecXmlReader {
private boolean readActive(Element regionTag) {
String activeValue = regionTag.getAttribute("active");
+ if ("".equals(activeValue)) return true; // Default to active
if ("true".equals(activeValue)) return true;
if ("false".equals(activeValue)) return false;
- throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
+ throw new IllegalArgumentException("Value of 'active' attribute in region tag must be 'true' or 'false' " +
"to control whether this region should receive traffic from the global endpoint of this application");
}
diff --git a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
index 74e79d5e8cf..43ccc34284f 100644
--- a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
+++ b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
@@ -1162,8 +1162,8 @@ public class DeploymentSpecTest {
var spec = DeploymentSpec.fromXml("<deployment>" +
" <instance id='default'>" +
" <prod>" +
- " <region active=\"true\">us-east</region>" +
- " <region active=\"true\">us-west</region>" +
+ " <region>us-east</region>" +
+ " <region>us-west</region>" +
" </prod>" +
" <endpoints>" +
" <endpoint id=\"foo\" container-id=\"bar\">" +
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java
index cc08e84ef9d..0a51ce3dda5 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/Attribute.java
@@ -332,6 +332,7 @@ public final class Attribute implements Cloneable, Serializable {
}
}
+ @SuppressWarnings("deprecation")
private DataType createReferenceDataType() {
if (!referenceDocumentType.isPresent()) {
throw new IllegalStateException("Referenced document type is not set!");
diff --git a/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java b/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java
index 59387c28287..fdbb1d8c8e0 100644
--- a/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java
+++ b/config-model/src/main/java/com/yahoo/vespa/configmodel/producers/DocumentManager.java
@@ -44,6 +44,7 @@ public class DocumentManager {
return documentConfigBuilder;
}
+ @SuppressWarnings("deprecation")
private void buildConfig(DataTypeCollection type, DocumentmanagerConfig.Builder documentConfigBuilder, Set<DataType> built) {
for (DataType dataType : type.getTypes()) {
if (built.contains(dataType)) continue;
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/DocumentGraphValidatorTest.java b/config-model/src/test/java/com/yahoo/searchdefinition/DocumentGraphValidatorTest.java
index f57c63b440f..30cda8b5f42 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/DocumentGraphValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/DocumentGraphValidatorTest.java
@@ -152,6 +152,7 @@ public class DocumentGraphValidatorTest {
return campaignSchema;
}
+ @SuppressWarnings("deprecation")
private static void createDocumentReference(Schema from, Schema to, String refFieldName) {
SDField refField = new TemporarySDField(refFieldName, ReferenceDataType.createWithInferredId(TemporaryStructuredDataType.create(to.getName())));
SDDocumentType fromDocument = from.getDocument();
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/DocumentReferenceResolverTest.java b/config-model/src/test/java/com/yahoo/searchdefinition/DocumentReferenceResolverTest.java
index 260b7a98fe7..19964700dd0 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/DocumentReferenceResolverTest.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/DocumentReferenceResolverTest.java
@@ -56,6 +56,7 @@ public class DocumentReferenceResolverTest {
assertSame(fooRefToBarField, fooReferenceMap.get("bar_ref").referenceField());
}
+ @SuppressWarnings("deprecation")
@Test
public void throws_user_friendly_exception_if_referenced_document_does_not_exist() {
// Create foo document with document reference to non-existing document bar
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/SchemaOrdererTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/SchemaOrdererTestCase.java
index c6f254df798..b3f2fb62ac2 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/SchemaOrdererTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/SchemaOrdererTestCase.java
@@ -88,6 +88,7 @@ public class SchemaOrdererTestCase extends AbstractSchemaTestCase {
assertEquals(expectedSearchOrder, actualSearchOrder);
}
+ @SuppressWarnings("deprecation")
private static void createDocumentReference(Schema from, Schema to, String refFieldName) {
SDField refField = new TemporarySDField(refFieldName, ReferenceDataType.createWithInferredId(TemporaryStructuredDataType.create(to.getName())));
SDDocumentType fromDocument = from.getDocument();
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/VsmFieldsTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/VsmFieldsTestCase.java
index 55702c9e1a7..138992477c0 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/VsmFieldsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/VsmFieldsTestCase.java
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
*/
public class VsmFieldsTestCase {
+ @SuppressWarnings("deprecation")
@Test
public void reference_type_field_is_unsearchable() {
Schema schema = new Schema("test", new Application(MockApplicationPackage.createEmpty()), new MockFileRegistry(), new TestableDeployLogger(), new TestProperties());
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/ParentChildSearchModel.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/ParentChildSearchModel.java
index a01fdd8725d..74fa7c72554 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/ParentChildSearchModel.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/ParentChildSearchModel.java
@@ -44,6 +44,7 @@ public class ParentChildSearchModel {
return result;
}
+ @SuppressWarnings("deprecation")
protected static SDField createRefField(String parentType, String fieldName) {
return new TemporarySDField(fieldName, ReferenceDataType.createWithInferredId(TemporaryStructuredDataType.create(parentType)));
}
diff --git a/config/src/vespa/config/common/configvalue.cpp b/config/src/vespa/config/common/configvalue.cpp
index d770c126d38..da1cbfc792c 100644
--- a/config/src/vespa/config/common/configvalue.cpp
+++ b/config/src/vespa/config/common/configvalue.cpp
@@ -55,7 +55,7 @@ ConfigValue::getLegacyFormat() const
return lines;
}
-const vespalib::string
+vespalib::string
ConfigValue::asJson() const {
if (_payload) {
const vespalib::slime::Inspector & payload(_payload->getSlimePayload());
diff --git a/config/src/vespa/config/common/configvalue.h b/config/src/vespa/config/common/configvalue.h
index 553a609b9db..bf4c320c061 100644
--- a/config/src/vespa/config/common/configvalue.h
+++ b/config/src/vespa/config/common/configvalue.h
@@ -35,8 +35,8 @@ public:
const vespalib::string & getLine(int i) const { return _lines.at(i); }
const std::vector<vespalib::string> & getLines() const { return _lines; }
std::vector<vespalib::string> getLegacyFormat() const;
- const vespalib::string asJson() const;
- const vespalib::string getXxhash64() const { return _xxhash64; }
+ vespalib::string asJson() const;
+ const vespalib::string& getXxhash64() const { return _xxhash64; }
void serializeV1(::vespalib::slime::Cursor & cursor) const;
void serializeV2(::vespalib::slime::Cursor & cursor) const;
diff --git a/config/src/vespa/config/frt/frtconfigagent.cpp b/config/src/vespa/config/frt/frtconfigagent.cpp
index 2b66e806270..827ef75251b 100644
--- a/config/src/vespa/config/frt/frtconfigagent.cpp
+++ b/config/src/vespa/config/frt/frtconfigagent.cpp
@@ -71,7 +71,7 @@ FRTConfigAgent::handleUpdatedGeneration(const ConfigKey & key, const ConfigState
if (LOG_WOULD_LOG(spam)) {
LOG(spam, "updating holder for key %s,", key.toString().c_str());
}
- _holder->handle(ConfigUpdate::UP(new ConfigUpdate(_latest, changed, newState.generation)));
+ _holder->handle(std::make_unique<ConfigUpdate>(_latest, changed, newState.generation));
_numConfigured++;
}
diff --git a/config/src/vespa/config/subscription/configsubscription.cpp b/config/src/vespa/config/subscription/configsubscription.cpp
index 4aaa30323c7..9790541906b 100644
--- a/config/src/vespa/config/subscription/configsubscription.cpp
+++ b/config/src/vespa/config/subscription/configsubscription.cpp
@@ -31,7 +31,11 @@ ConfigSubscription::nextUpdate(int64_t generation, std::chrono::milliseconds tim
if (_closed || !_holder->poll()) {
return false;
}
+ auto old = std::move(_next);
_next = _holder->provide();
+ if (old) {
+ _next->merge(*old);
+ }
if (isGenerationNewer(_next->getGeneration(), generation)) {
return true;
}
@@ -98,7 +102,7 @@ ConfigSubscription::flip()
_current = std::move(_next);
_lastGenerationChanged = _current->getGeneration();
} else {
- _current.reset(new ConfigUpdate(_current->getValue(), false, _next->getGeneration()));
+ _current = std::make_unique<ConfigUpdate>(_current->getValue(), false, _next->getGeneration());
}
_isChanged = change;
}
diff --git a/configdefinitions/abi-spec.json b/configdefinitions/abi-spec.json
new file mode 100644
index 00000000000..9e26dfeeb6e
--- /dev/null
+++ b/configdefinitions/abi-spec.json
@@ -0,0 +1 @@
+{} \ No newline at end of file
diff --git a/configdefinitions/pom.xml b/configdefinitions/pom.xml
index d618ba151bf..78aa8b59222 100644
--- a/configdefinitions/pom.xml
+++ b/configdefinitions/pom.xml
@@ -60,6 +60,10 @@
<updateReleaseInfo>true</updateReleaseInfo>
</configuration>
</plugin>
+ <plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>abi-check-plugin</artifactId>
+ </plugin>
</plugins>
</build>
</project>
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
index 6ef0df9f099..943d6ac7b18 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
@@ -118,7 +118,7 @@ public class RoutingController {
// Avoid reading application more than once per call to this
Supplier<DeploymentSpec> deploymentSpec = Suppliers.memoize(() -> controller.applications().requireApplication(TenantAndApplicationId.from(deployment.applicationId())).deploymentSpec());
// To discover the cluster name for a zone-scoped endpoint, we need to read routing policies
- for (var policy : routingPolicies.get(deployment).values()) {
+ for (var policy : routingPolicies.read(deployment)) {
if (!policy.status().isActive()) continue;
for (var routingMethod : controller.zoneRegistry().routingMethods(policy.id().zone())) {
if (routingMethod.isDirect() && !isSystemApplication && !canRouteDirectlyTo(deployment, deploymentSpec.get())) continue;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
index 3892ceeddf9..4e8f17b6098 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
@@ -277,8 +277,13 @@ public class DeploymentTrigger {
/** Overrides the given instance's platform and application changes with any contained in the given change. */
public void forceChange(ApplicationId instanceId, Change change) {
applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> {
- applications().store(application.with(instanceId.instance(),
- instance -> instance.withChange(change.onTopOf(application.get().require(instanceId.instance()).change()))));
+ Change newChange = change.onTopOf(application.get().require(instanceId.instance()).change());
+ application = application.with(instanceId.instance(),
+ instance -> instance.withChange(newChange));
+ DeploymentStatus newStatus = jobs.deploymentStatus(application.get());
+ application = application.with(instanceId.instance(),
+ instance -> instance.withChange(remainingChange(instance, newStatus)));
+ applications().store(application);
});
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
index e10dcfd3b3b..c6dd8bab309 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
@@ -20,6 +20,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucket;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateMetadata;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
+import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLog;
import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntry;
@@ -27,11 +28,9 @@ import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntrySerializer;
import com.yahoo.vespa.hosted.controller.deployment.Run;
import com.yahoo.vespa.hosted.controller.deployment.Step;
import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue;
-import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
import com.yahoo.vespa.hosted.controller.notification.Notification;
-import com.yahoo.vespa.hosted.controller.routing.RoutingStatus;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy;
-import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId;
+import com.yahoo.vespa.hosted.controller.routing.RoutingStatus;
import com.yahoo.vespa.hosted.controller.routing.ZoneRoutingPolicy;
import com.yahoo.vespa.hosted.controller.support.access.SupportAccess;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
@@ -514,19 +513,31 @@ public class CuratorDb {
// -------------- Routing policies ----------------------------------------
- public void writeRoutingPolicies(ApplicationId application, Map<RoutingPolicyId, RoutingPolicy> policies) {
+ public void writeRoutingPolicies(ApplicationId application, List<RoutingPolicy> policies) {
+ for (var policy : policies) {
+ if (!policy.id().owner().equals(application)) {
+ throw new IllegalArgumentException(policy.id() + " does not belong to the application being written: " +
+ application.toShortString());
+ }
+ }
curator.set(routingPolicyPath(application), asJson(routingPolicySerializer.toSlime(policies)));
}
- public Map<ApplicationId, Map<RoutingPolicyId, RoutingPolicy>> readRoutingPolicies() {
+ public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies() {
+ return readRoutingPolicies((instance) -> true);
+ }
+
+ public Map<ApplicationId, List<RoutingPolicy>> readRoutingPolicies(Predicate<ApplicationId> filter) {
return curator.getChildren(routingPoliciesRoot).stream()
.map(ApplicationId::fromSerializedForm)
- .collect(Collectors.toUnmodifiableMap(Function.identity(), this::readRoutingPolicies));
+ .filter(filter)
+ .collect(Collectors.toUnmodifiableMap(Function.identity(),
+ this::readRoutingPolicies));
}
- public Map<RoutingPolicyId, RoutingPolicy> readRoutingPolicies(ApplicationId application) {
+ public List<RoutingPolicy> readRoutingPolicies(ApplicationId application) {
return readSlime(routingPolicyPath(application)).map(slime -> routingPolicySerializer.fromSlime(application, slime))
- .orElseGet(Map::of);
+ .orElseGet(List::of);
}
public void writeZoneRoutingPolicy(ZoneRoutingPolicy policy) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
index 04d1a4c7433..17337f823c0 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
@@ -11,15 +11,15 @@ import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
-import com.yahoo.vespa.hosted.controller.routing.RoutingStatus;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId;
+import com.yahoo.vespa.hosted.controller.routing.RoutingStatus;
import java.time.Instant;
+import java.util.ArrayList;
import java.util.Collections;
-import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
-import java.util.Map;
+import java.util.List;
import java.util.Set;
/**
@@ -50,11 +50,11 @@ public class RoutingPolicySerializer {
private static final String changedAtField = "changedAt";
private static final String statusField = "status";
- public Slime toSlime(Map<RoutingPolicyId, RoutingPolicy> routingPolicies) {
+ public Slime toSlime(List<RoutingPolicy> routingPolicies) {
var slime = new Slime();
var root = slime.setObject();
var policyArray = root.setArray(routingPoliciesField);
- routingPolicies.values().forEach(policy -> {
+ routingPolicies.forEach(policy -> {
var policyObject = policyArray.addObject();
policyObject.setString(clusterField, policy.id().cluster().value());
policyObject.setString(zoneField, policy.id().zone().value());
@@ -70,8 +70,8 @@ public class RoutingPolicySerializer {
return slime;
}
- public Map<RoutingPolicyId, RoutingPolicy> fromSlime(ApplicationId owner, Slime slime) {
- var policies = new LinkedHashMap<RoutingPolicyId, RoutingPolicy>();
+ public List<RoutingPolicy> fromSlime(ApplicationId owner, Slime slime) {
+ List<RoutingPolicy> policies = new ArrayList<>();
var root = slime.get();
var field = root.field(routingPoliciesField);
field.traverse((ArrayTraverser) (i, inspect) -> {
@@ -82,15 +82,15 @@ public class RoutingPolicySerializer {
RoutingPolicyId id = new RoutingPolicyId(owner,
ClusterSpec.Id.from(inspect.field(clusterField).asString()),
ZoneId.from(inspect.field(zoneField).asString()));
- policies.put(id, new RoutingPolicy(id,
- HostName.from(inspect.field(canonicalNameField).asString()),
- SlimeUtils.optionalString(inspect.field(dnsZoneField)),
- instanceEndpoints,
- applicationEndpoints,
- new RoutingPolicy.Status(inspect.field(loadBalancerActiveField).asBool(),
- globalRoutingFromSlime(inspect.field(globalRoutingField)))));
+ policies.add(new RoutingPolicy(id,
+ HostName.from(inspect.field(canonicalNameField).asString()),
+ SlimeUtils.optionalString(inspect.field(dnsZoneField)),
+ instanceEndpoints,
+ applicationEndpoints,
+ new RoutingPolicy.Status(inspect.field(loadBalancerActiveField).asBool(),
+ globalRoutingFromSlime(inspect.field(globalRoutingField)))));
});
- return Collections.unmodifiableMap(policies);
+ return Collections.unmodifiableList(policies);
}
public void globalRoutingToSlime(RoutingStatus routingStatus, Cursor object) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
index d2dc2771160..b98ef717dd3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.controller.routing;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.config.provision.zone.ZoneId;
@@ -27,17 +26,14 @@ import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue.Priority;
import com.yahoo.vespa.hosted.controller.dns.NameServiceRequest;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
-import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
@@ -62,43 +58,63 @@ public class RoutingPolicies {
}
}
- /** Read all known routing policies for given instance */
- public Map<RoutingPolicyId, RoutingPolicy> get(ApplicationId application) {
- return db.readRoutingPolicies(application);
+ /** Read all routing policies for given deployment */
+ public RoutingPolicyList read(DeploymentId deployment) {
+ return read(deployment.applicationId()).deployment(deployment);
}
- /** Read all known routing policies for given deployment */
- public Map<RoutingPolicyId, RoutingPolicy> get(DeploymentId deployment) {
- return db.readRoutingPolicies(deployment.applicationId()).entrySet()
+ /** Read all routing policies for given instance */
+ public RoutingPolicyList read(ApplicationId instance) {
+ return RoutingPolicyList.copyOf(db.readRoutingPolicies(instance));
+ }
+
+ /** Read all routing policies for given application */
+ private RoutingPolicyList read(TenantAndApplicationId application) {
+ return db.readRoutingPolicies((instance) -> TenantAndApplicationId.from(instance).equals(application))
+ .values()
+ .stream()
+ .flatMap(Collection::stream)
+ .collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf));
+ }
+
+ /** Read all routing policies */
+ private RoutingPolicyList readAll() {
+ return db.readRoutingPolicies()
+ .values()
.stream()
- .filter(kv -> kv.getKey().zone().equals(deployment.zoneId()))
- .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+ .flatMap(Collection::stream)
+ .collect(Collectors.collectingAndThen(Collectors.toList(), RoutingPolicyList::copyOf));
}
/** Read routing policy for given zone */
- public ZoneRoutingPolicy get(ZoneId zone) {
+ public ZoneRoutingPolicy read(ZoneId zone) {
return db.readZoneRoutingPolicy(zone);
}
/**
* Refresh routing policies for instance in given zone. This is idempotent and changes will only be performed if
- * load balancers for given instance have changed.
+ * routing configuration affecting given deployment has changed.
*/
- public void refresh(ApplicationId instance, DeploymentSpec deploymentSpec, ZoneId zone) {
- LoadBalancerAllocation allocation = new LoadBalancerAllocation(instance, zone, controller.serviceRegistry().configServer()
- .getLoadBalancers(instance, zone),
- deploymentSpec);
+ public void refresh(DeploymentId deployment, DeploymentSpec deploymentSpec) {
+ ApplicationId instance = deployment.applicationId();
+ List<LoadBalancer> loadBalancers = controller.serviceRegistry().configServer()
+ .getLoadBalancers(instance, deployment.zoneId());
+ LoadBalancerAllocation allocation = new LoadBalancerAllocation(loadBalancers, deployment, deploymentSpec);
Set<ZoneId> inactiveZones = inactiveZones(instance, deploymentSpec);
try (var lock = db.lockRoutingPolicies()) {
- removeGlobalDnsUnreferencedBy(allocation, lock);
- removeApplicationDnsUnreferencedBy(allocation, lock);
+ RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance));
+ RoutingPolicyList instancePolicies = applicationPolicies.instance(instance);
+ RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(allocation.deployment);
+
+ removeGlobalDnsUnreferencedBy(allocation, deploymentPolicies, lock);
+ removeApplicationDnsUnreferencedBy(allocation, deploymentPolicies, lock);
- storePoliciesOf(allocation, lock);
- removePoliciesUnreferencedBy(allocation, lock);
+ instancePolicies = storePoliciesOf(allocation, instancePolicies, lock);
+ instancePolicies = removePoliciesUnreferencedBy(allocation, instancePolicies, lock);
- Collection<RoutingPolicy> policies = get(allocation.deployment.applicationId()).values();
- updateGlobalDnsOf(policies, inactiveZones, lock);
- updateApplicationDnsOf(policies, inactiveZones, lock);
+ applicationPolicies = applicationPolicies.replace(instance, instancePolicies);
+ updateGlobalDnsOf(instancePolicies, inactiveZones, lock);
+ updateApplicationDnsOf(applicationPolicies, inactiveZones, lock);
}
}
@@ -107,33 +123,37 @@ public class RoutingPolicies {
try (var lock = db.lockRoutingPolicies()) {
db.writeZoneRoutingPolicy(new ZoneRoutingPolicy(zone, RoutingStatus.create(value, RoutingStatus.Agent.operator,
controller.clock().instant())));
- Map<ApplicationId, Map<RoutingPolicyId, RoutingPolicy>> allPolicies = db.readRoutingPolicies();
- for (var applicationPolicies : allPolicies.values()) {
- updateGlobalDnsOf(applicationPolicies.values(), Set.of(), lock);
+ Map<ApplicationId, RoutingPolicyList> allPolicies = readAll().groupingBy(policy -> policy.id().owner());
+ for (var instancePolicies : allPolicies.values()) {
+ updateGlobalDnsOf(instancePolicies, Set.of(), lock);
}
}
}
/** Set the status of all global endpoints for given deployment */
public void setRoutingStatus(DeploymentId deployment, RoutingStatus.Value value, RoutingStatus.Agent agent) {
+ ApplicationId instance = deployment.applicationId();
try (var lock = db.lockRoutingPolicies()) {
- var policies = get(deployment.applicationId());
- var newPolicies = new LinkedHashMap<>(policies);
- for (var policy : policies.values()) {
- if (!policy.appliesTo(deployment)) continue;
+ RoutingPolicyList applicationPolicies = read(TenantAndApplicationId.from(instance));
+ RoutingPolicyList deploymentPolicies = applicationPolicies.deployment(deployment);
+ Map<RoutingPolicyId, RoutingPolicy> updatedPolicies = new LinkedHashMap<>(applicationPolicies.asMap());
+ for (var policy : deploymentPolicies) {
var newPolicy = policy.with(policy.status().with(RoutingStatus.create(value, agent,
controller.clock().instant())));
- newPolicies.put(policy.id(), newPolicy);
+ updatedPolicies.put(policy.id(), newPolicy);
}
- db.writeRoutingPolicies(deployment.applicationId(), newPolicies);
- updateGlobalDnsOf(newPolicies.values(), Set.of(), lock);
- updateApplicationDnsOf(newPolicies.values(), Set.of(), lock);
+
+ RoutingPolicyList effectivePolicies = RoutingPolicyList.copyOf(updatedPolicies.values());
+ Map<ApplicationId, RoutingPolicyList> policiesByInstance = effectivePolicies.groupingBy(policy -> policy.id().owner());
+ policiesByInstance.forEach((owner, instancePolicies) -> db.writeRoutingPolicies(owner, instancePolicies.asList()));
+ policiesByInstance.forEach((ignored, instancePolicies) -> updateGlobalDnsOf(instancePolicies, Set.of(), lock));
+ updateApplicationDnsOf(effectivePolicies, Set.of(), lock);
}
}
/** Update global DNS records for given policies */
- private void updateGlobalDnsOf(Collection<RoutingPolicy> routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) {
- Map<RoutingId, List<RoutingPolicy>> routingTable = instanceRoutingTable(routingPolicies);
+ private void updateGlobalDnsOf(RoutingPolicyList instancePolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) {
+ Map<RoutingId, List<RoutingPolicy>> routingTable = instancePolicies.asInstanceRoutingTable();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
controller.routing().readDeclaredEndpointsOf(routingId.instance())
@@ -205,17 +225,17 @@ public class RoutingPolicies {
}
- private void updateApplicationDnsOf(Collection<RoutingPolicy> routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) {
+ private void updateApplicationDnsOf(RoutingPolicyList routingPolicies, Set<ZoneId> inactiveZones, @SuppressWarnings("unused") Lock lock) {
// In the context of single deployment (which this is) there is only one routing policy per routing ID. I.e.
// there is no scenario where more than one deployment within an instance can be a member the same
// application-level endpoint. However, to allow this in the future the routing table remains
// Map<RoutingId, List<RoutingPolicy>> instead of Map<RoutingId, RoutingPolicy>.
- Map<RoutingId, List<RoutingPolicy>> routingTable = applicationRoutingTable(routingPolicies);
+ Map<RoutingId, List<RoutingPolicy>> routingTable = routingPolicies.asApplicationRoutingTable();
if (routingTable.isEmpty()) return;
Application application = controller.applications().requireApplication(routingTable.keySet().iterator().next().application());
- Map<DeploymentId, Map<EndpointId, Integer>> targetWeights = targetWeights(application);
Map<Endpoint, Set<AliasTarget>> targetsByEndpoint = new LinkedHashMap<>();
+ Map<Endpoint, Set<AliasTarget>> inactiveTargetsByEndpoint = new LinkedHashMap<>();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
EndpointList endpoints = controller.routing().declaredEndpointsOf(application)
@@ -231,32 +251,64 @@ public class RoutingPolicies {
for (var target : endpoint.targets()) {
if (!policy.appliesTo(target.deployment())) continue;
if (policy.dnsZone().isEmpty()) continue; // Does not support ALIAS records
- int weight = target.weight();
- if (isConfiguredOut(policy, inactiveZones) && removableFromApplicationEndpoint(policy, application, targetWeights)) {
- weight = 0;
- }
+ ZoneRoutingPolicy zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
WeightedAliasTarget weightedAliasTarget = new WeightedAliasTarget(policy.canonicalName(), policy.dnsZone().get(),
- target.deployment().zoneId(), weight);
- targetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>())
- .add(weightedAliasTarget);
+ target.deployment().zoneId(), target.weight());
+ Set<AliasTarget> activeTargets = targetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>());
+ Set<AliasTarget> inactiveTargets = inactiveTargetsByEndpoint.computeIfAbsent(endpoint, (k) -> new LinkedHashSet<>());
+ if (isConfiguredOut(zonePolicy, policy, inactiveZones)) {
+ inactiveTargets.add(weightedAliasTarget);
+ } else {
+ activeTargets.add(weightedAliasTarget);
+ }
}
}
}
+
+ // If all targets are configured OUT, all targets are kept IN. We do this because otherwise removing 100% of
+ // the ALIAS records would cause the application endpoint to stop resolving entirely (NXDOMAIN).
+ for (var kv : targetsByEndpoint.entrySet()) {
+ Endpoint endpoint = kv.getKey();
+ Set<AliasTarget> activeTargets = kv.getValue();
+ if (!activeTargets.isEmpty()) {
+ continue;
+ }
+ Set<AliasTarget> inactiveTargets = inactiveTargetsByEndpoint.get(endpoint);
+ activeTargets.addAll(inactiveTargets);
+ inactiveTargets.clear();
+ }
targetsByEndpoint.forEach((applicationEndpoint, targets) -> {
ZoneId targetZone = applicationEndpoint.targets().stream()
- .map(Endpoint.Target::deployment)
- .map(DeploymentId::zoneId)
- .findFirst()
- .get();
+ .map(Endpoint.Target::deployment)
+ .map(DeploymentId::zoneId)
+ .findFirst()
+ .get();
nameServiceForwarderIn(targetZone).createAlias(RecordName.from(applicationEndpoint.dnsName()),
targets,
Priority.normal);
});
+ inactiveTargetsByEndpoint.forEach((applicationEndpoint, targets) -> {
+ ZoneId targetZone = applicationEndpoint.targets().stream()
+ .map(Endpoint.Target::deployment)
+ .map(DeploymentId::zoneId)
+ .findFirst()
+ .get();
+ targets.forEach(target -> {
+ nameServiceForwarderIn(targetZone).removeRecords(Record.Type.ALIAS,
+ RecordName.from(applicationEndpoint.dnsName()),
+ RecordData.fqdn(target.name().value()),
+ Priority.normal);
+ });
+ });
}
- /** Store routing policies for given load balancers */
- private void storePoliciesOf(LoadBalancerAllocation allocation, @SuppressWarnings("unused") Lock lock) {
- var policies = new LinkedHashMap<>(get(allocation.deployment.applicationId()));
+ /**
+ * Store routing policies for given load balancers
+ *
+ * @return the updated policies
+ */
+ private RoutingPolicyList storePoliciesOf(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Lock lock) {
+ Map<RoutingPolicyId, RoutingPolicy> policies = new LinkedHashMap<>(instancePolicies.asMap());
for (LoadBalancer loadBalancer : allocation.loadBalancers) {
if (loadBalancer.hostname().isEmpty()) continue;
var policyId = new RoutingPolicyId(loadBalancer.application(), loadBalancer.cluster(), allocation.deployment.zoneId());
@@ -272,7 +324,9 @@ public class RoutingPolicies {
updateZoneDnsOf(newPolicy);
policies.put(newPolicy.id(), newPolicy);
}
- db.writeRoutingPolicies(allocation.deployment.applicationId(), policies);
+ RoutingPolicyList updated = RoutingPolicyList.copyOf(policies.values());
+ db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList());
+ return updated;
}
/** Update zone DNS record for given policy */
@@ -284,14 +338,17 @@ public class RoutingPolicies {
}
}
- /** Remove policies and zone DNS records unreferenced by given load balancers */
- private void removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, @SuppressWarnings("unused") Lock lock) {
- var policies = get(allocation.deployment.applicationId());
- var newPolicies = new LinkedHashMap<>(policies);
- var activeIds = allocation.asPolicyIds();
- for (var policy : policies.values()) {
- // Leave active load balancers and irrelevant zones alone
- if (activeIds.contains(policy.id()) || !policy.appliesTo(allocation.deployment)) continue;
+ /**
+ * Remove policies and zone DNS records unreferenced by given load balancers
+ *
+ * @return the updated policies
+ */
+ private RoutingPolicyList removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Lock lock) {
+ Map<RoutingPolicyId, RoutingPolicy> newPolicies = new LinkedHashMap<>(instancePolicies.asMap());
+ Set<RoutingPolicyId> activeIds = allocation.asPolicyIds();
+ RoutingPolicyList removable = instancePolicies.deployment(allocation.deployment)
+ .not().matching(policy -> activeIds.contains(policy.id()));
+ for (var policy : removable) {
for (var endpoint : policy.zoneEndpointsIn(controller.system(), RoutingMethod.exclusive, controller.zoneRegistry())) {
var dnsName = endpoint.dnsName();
nameServiceForwarderIn(allocation.deployment.zoneId()).removeRecords(Record.Type.CNAME,
@@ -300,13 +357,14 @@ public class RoutingPolicies {
}
newPolicies.remove(policy.id());
}
- db.writeRoutingPolicies(allocation.deployment.applicationId(), newPolicies);
+ RoutingPolicyList updated = RoutingPolicyList.copyOf(newPolicies.values());
+ db.writeRoutingPolicies(allocation.deployment.applicationId(), updated.asList());
+ return updated;
}
/** Remove unreferenced instance endpoints from DNS */
- private void removeGlobalDnsUnreferencedBy(LoadBalancerAllocation allocation, @SuppressWarnings("unused") Lock lock) {
- Collection<RoutingPolicy> zonePolicies = get(allocation.deployment).values();
- Set<RoutingId> removalCandidates = new HashSet<>(instanceRoutingTable(zonePolicies).keySet());
+ private void removeGlobalDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Lock lock) {
+ Set<RoutingId> removalCandidates = new HashSet<>(deploymentPolicies.asInstanceRoutingTable().keySet());
Set<RoutingId> activeRoutingIds = instanceRoutingIds(allocation);
removalCandidates.removeAll(activeRoutingIds);
for (var id : removalCandidates) {
@@ -322,9 +380,8 @@ public class RoutingPolicies {
}
/** Remove unreferenced application endpoints in given allocation from DNS */
- private void removeApplicationDnsUnreferencedBy(LoadBalancerAllocation allocation, @SuppressWarnings("unused") Lock lock) {
- Collection<RoutingPolicy> zonePolicies = get(allocation.deployment).values();
- Map<RoutingId, List<RoutingPolicy>> routingTable = applicationRoutingTable(zonePolicies);
+ private void removeApplicationDnsUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList deploymentPolicies, @SuppressWarnings("unused") Lock lock) {
+ Map<RoutingId, List<RoutingPolicy>> routingTable = deploymentPolicies.asApplicationRoutingTable();
Set<RoutingId> removalCandidates = new HashSet<>(routingTable.keySet());
Set<RoutingId> activeRoutingIds = applicationRoutingIds(allocation);
removalCandidates.removeAll(activeRoutingIds);
@@ -345,42 +402,6 @@ public class RoutingPolicies {
}
}
- /** Returns whether we disable given policy from its application endpoints, taking weights and status of other instances into account */
- private boolean removableFromApplicationEndpoint(RoutingPolicy policy, Application application, Map<DeploymentId, Map<EndpointId, Integer>> targetWeights) {
- List<RoutingPolicy> relatedPolicies = application.productionInstances().keySet().stream()
- .filter(instanceName -> !policy.id().owner().instance().equals(instanceName))
- .map(instanceName -> application.id().instance(instanceName))
- .flatMap(instance -> get(instance).values().stream())
- .filter(relatedPolicy -> relatedPolicy.id().zone().equals(policy.id().zone()) &&
- relatedPolicy.id().cluster().equals(policy.id().cluster()))
- .collect(Collectors.toUnmodifiableList());
- for (var endpointId : policy.applicationEndpoints()) {
- boolean anyIn = relatedPolicies.stream()
- .anyMatch(rp -> rp.applicationEndpoints().contains(endpointId) &&
- rp.status().routingStatus().value() == RoutingStatus.Value.in &&
- targetWeights.get(rp.id().deployment())
- .get(endpointId) > 0);
- if (!anyIn) {
- return false;
- }
- }
- return true;
- }
-
- /** Returns target weights of application endpoints in given application, grouped by deployment */
- private Map<DeploymentId, Map<EndpointId, Integer>> targetWeights(Application application) {
- Map<DeploymentId, Map<EndpointId, Integer>> weights = new HashMap<>();
- for (var endpoint : application.deploymentSpec().endpoints()) {
- for (var target : endpoint.targets()) {
- weights.computeIfAbsent(new DeploymentId(application.id().instance(target.instance()),
- ZoneId.from(Environment.prod, target.region())),
- (k) -> new HashMap<>())
- .put(EndpointId.of(endpoint.endpointId()), target.weight());
- }
- }
- return weights;
- }
-
private Set<RoutingId> instanceRoutingIds(LoadBalancerAllocation allocation) {
return routingIdsFrom(allocation, false);
}
@@ -403,45 +424,13 @@ public class RoutingPolicies {
return Collections.unmodifiableSet(routingIds);
}
- /** Compute a routing table for instance-level endpoints from given policies */
- private static Map<RoutingId, List<RoutingPolicy>> instanceRoutingTable(Collection<RoutingPolicy> routingPolicies) {
- return routingTable(routingPolicies, false);
- }
-
- /** Compute a routing table for application-level endpoints from given policies */
- private static Map<RoutingId, List<RoutingPolicy>> applicationRoutingTable(Collection<RoutingPolicy> routingPolicies) {
- return routingTable(routingPolicies, true);
- }
-
- private static Map<RoutingId, List<RoutingPolicy>> routingTable(Collection<RoutingPolicy> routingPolicies, boolean applicationLevel) {
- Map<RoutingId, List<RoutingPolicy>> routingTable = new LinkedHashMap<>();
- for (var policy : routingPolicies) {
- Set<EndpointId> endpoints = applicationLevel ? policy.applicationEndpoints() : policy.instanceEndpoints();
- for (var endpoint : endpoints) {
- RoutingId id = RoutingId.of(policy.id().owner(), endpoint);
- routingTable.computeIfAbsent(id, k -> new ArrayList<>())
- .add(policy);
- }
- }
- return Collections.unmodifiableMap(routingTable);
- }
-
- /** Returns whether the endpoints of given policy are globally configured {@link RoutingStatus.Value#out} */
- private static boolean isConfiguredOut(ZoneRoutingPolicy zonePolicy, RoutingPolicy policy, Set<ZoneId> inactiveZones) {
- return isConfiguredOut(policy, Optional.of(zonePolicy), inactiveZones);
- }
-
/** Returns whether the endpoints of given policy are configured {@link RoutingStatus.Value#out} */
- private static boolean isConfiguredOut(RoutingPolicy policy, Set<ZoneId> inactiveZones) {
- return isConfiguredOut(policy, Optional.empty(), inactiveZones);
- }
-
- private static boolean isConfiguredOut(RoutingPolicy policy, Optional<ZoneRoutingPolicy> zonePolicy, Set<ZoneId> inactiveZones) {
+ private static boolean isConfiguredOut(ZoneRoutingPolicy zonePolicy, RoutingPolicy policy, Set<ZoneId> inactiveZones) {
// A deployment can be configured out from endpoints at any of the following levels:
- // - zone level (ZoneRoutingPolicy, only applies to global endpoints)
+ // - zone level (ZoneRoutingPolicy)
// - deployment level (RoutingPolicy)
// - application package level (deployment.xml)
- return (zonePolicy.isPresent() && zonePolicy.get().routingStatus().value() == RoutingStatus.Value.out) ||
+ return zonePolicy.routingStatus().value() == RoutingStatus.Value.out ||
policy.status().routingStatus().value() == RoutingStatus.Value.out ||
inactiveZones.contains(policy.id().zone());
}
@@ -499,9 +488,9 @@ public class RoutingPolicies {
private final List<LoadBalancer> loadBalancers;
private final DeploymentSpec deploymentSpec;
- private LoadBalancerAllocation(ApplicationId application, ZoneId zone, List<LoadBalancer> loadBalancers,
+ private LoadBalancerAllocation(List<LoadBalancer> loadBalancers, DeploymentId deployment,
DeploymentSpec deploymentSpec) {
- this.deployment = new DeploymentId(application, zone);
+ this.deployment = deployment;
this.loadBalancers = List.copyOf(loadBalancers);
this.deploymentSpec = deploymentSpec;
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyId.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyId.java
index e9cbdbd9b75..d64241b1239 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyId.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyId.java
@@ -60,4 +60,9 @@ public class RoutingPolicyId {
return Objects.hash(owner, cluster, zone);
}
+ @Override
+ public String toString() {
+ return "routing policy for " + cluster + ", in " + zone + ", owned by " + owner;
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java
new file mode 100644
index 00000000000..a5efc016c68
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicyList.java
@@ -0,0 +1,99 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.routing;
+
+import com.yahoo.collections.AbstractFilteringList;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.application.EndpointId;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+/**
+ * A filterable list of {@link RoutingPolicy}'s.
+ *
+ * This is immutable.
+ *
+ * @author mpolden
+ */
+public class RoutingPolicyList extends AbstractFilteringList<RoutingPolicy, RoutingPolicyList> {
+
+ private final Map<RoutingPolicyId, RoutingPolicy> policiesById;
+
+ protected RoutingPolicyList(Collection<RoutingPolicy> items, boolean negate) {
+ super(items, negate, RoutingPolicyList::new);
+ this.policiesById = items.stream().collect(Collectors.collectingAndThen(
+ Collectors.toMap(RoutingPolicy::id,
+ Function.identity(),
+ (p1, p2) -> {
+ throw new IllegalArgumentException("Duplicate key " + p1.id());
+ },
+ LinkedHashMap::new),
+ Collections::unmodifiableMap)
+ );
+ }
+
+ /** Returns the subset of policies owned by given instance */
+ public RoutingPolicyList instance(ApplicationId instance) {
+ return matching(policy -> policy.id().owner().equals(instance));
+ }
+
+ /** Returns the subset of policies applying to given deployment */
+ public RoutingPolicyList deployment(DeploymentId deployment) {
+ return matching(policy -> policy.appliesTo(deployment));
+ }
+
+ /** Returns the policy with given ID, if any */
+ public Optional<RoutingPolicy> of(RoutingPolicyId id) {
+ return Optional.ofNullable(policiesById.get(id));
+ }
+
+ /** Returns this grouped by policy ID */
+ public Map<RoutingPolicyId, RoutingPolicy> asMap() {
+ return policiesById;
+ }
+
+ /** Returns a copy of this with all policies for instance replaced with given policies */
+ public RoutingPolicyList replace(ApplicationId instance, RoutingPolicyList policies) {
+ List<RoutingPolicy> copy = new ArrayList<>(asList());
+ copy.removeIf(policy -> policy.id().owner().equals(instance));
+ policies.forEach(copy::add);
+ return copyOf(copy);
+ }
+
+ /** Create a routing table for instance-level endpoints backed by routing policies in this */
+ Map<RoutingId, List<RoutingPolicy>> asInstanceRoutingTable() {
+ return asRoutingTable(false);
+ }
+
+ /** Create a routing table for application-level endpoints backed by routing policies in this */
+ Map<RoutingId, List<RoutingPolicy>> asApplicationRoutingTable() {
+ return asRoutingTable(true);
+ }
+
+ private Map<RoutingId, List<RoutingPolicy>> asRoutingTable(boolean applicationLevel) {
+ Map<RoutingId, List<RoutingPolicy>> routingTable = new LinkedHashMap<>();
+ for (var policy : this) {
+ Set<EndpointId> endpoints = applicationLevel ? policy.applicationEndpoints() : policy.instanceEndpoints();
+ for (var endpoint : endpoints) {
+ RoutingId id = RoutingId.of(policy.id().owner(), endpoint);
+ routingTable.computeIfAbsent(id, k -> new ArrayList<>())
+ .add(policy);
+ }
+ }
+ return Collections.unmodifiableMap(routingTable);
+ }
+
+ public static RoutingPolicyList copyOf(Collection<RoutingPolicy> policies) {
+ return new RoutingPolicyList(policies, false);
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
index 28fbeee28f5..6fd8a3a84d5 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
@@ -49,7 +49,7 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
/** Configure routing for the deployment in this context, using given deployment spec */
public final void configure(DeploymentSpec deploymentSpec) {
- controller.policies().refresh(deployment.applicationId(), deploymentSpec, deployment.zoneId());
+ controller.policies().refresh(deployment, deploymentSpec);
}
/** Routing method of this context */
@@ -60,7 +60,7 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
/** Read the routing policy for given cluster in this deployment */
public final Optional<RoutingPolicy> routingPolicy(ClusterSpec.Id cluster) {
RoutingPolicyId id = new RoutingPolicyId(deployment.applicationId(), cluster, deployment.zoneId());
- return Optional.ofNullable(controller.policies().get(deployment).get(id));
+ return controller.policies().read(deployment).of(id);
}
/**
@@ -142,8 +142,8 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
public RoutingStatus routingStatus() {
// Status for a deployment applies to all clusters within the deployment, so we use the status from the
// first matching policy here
- return controller.policies().get(deployment).values().stream()
- .findFirst()
+ return controller.policies().read(deployment)
+ .first()
.map(RoutingPolicy::status)
.map(RoutingPolicy.Status::routingStatus)
.orElse(RoutingStatus.DEFAULT);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/ExclusiveZoneRoutingContext.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/ExclusiveZoneRoutingContext.java
index e29fb5ab404..75009e0b37a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/ExclusiveZoneRoutingContext.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/ExclusiveZoneRoutingContext.java
@@ -30,7 +30,7 @@ public class ExclusiveZoneRoutingContext implements RoutingContext {
@Override
public RoutingStatus routingStatus() {
- return policies.get(zone).routingStatus();
+ return policies.read(zone).routingStatus();
}
@Override
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
index d98789591ab..699721b128c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
@@ -240,13 +240,13 @@ public class DeploymentContext {
public DeploymentContext addInactiveRoutingPolicy(ZoneId zone) {
var clusterId = "default-inactive";
var id = new RoutingPolicyId(instanceId, ClusterSpec.Id.from(clusterId), zone);
- var policies = new LinkedHashMap<>(tester.controller().curator().readRoutingPolicies(instanceId));
+ var policies = new LinkedHashMap<>(tester.controller().routing().policies().read(instanceId).asMap());
policies.put(id, new RoutingPolicy(id, HostName.from("lb-host"),
Optional.empty(),
Set.of(EndpointId.of("default")),
Set.of(),
new RoutingPolicy.Status(false, RoutingStatus.DEFAULT)));
- tester.controller().curator().writeRoutingPolicies(instanceId, policies);
+ tester.controller().curator().writeRoutingPolicies(instanceId, List.copyOf(policies.values()));
return this;
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
index 1f5fa243838..102dfde16ec 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
@@ -465,6 +465,21 @@ public class DeploymentTriggerTest {
}
@Test
+ public void settingANoOpChangeIsANoOp() {
+ var app = tester.newDeploymentContext().submit().deploy();
+ ApplicationVersion appVersion0 = app.lastSubmission().get();
+ app.submit().deploy();
+ ApplicationVersion appVersion1 = app.lastSubmission().get();
+
+ // Triggering a roll-out of an already deployed application is a no-op.
+ assertEquals(Change.empty(), app.instance().change());
+ tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion0));
+ assertEquals(Change.empty(), app.instance().change());
+ tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion1));
+ assertEquals(Change.empty(), app.instance().change());
+ }
+
+ @Test
public void stepIsCompletePreciselyWhenItShouldBe() {
var app1 = tester.newDeploymentContext("tenant1", "app1", "default");
var app2 = tester.newDeploymentContext("tenant1", "app2", "default");
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java
index 2e36b8969ba..422188420bd 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java
@@ -1,19 +1,19 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.persistence;
-import com.google.common.collect.ImmutableMap;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
-import com.yahoo.vespa.hosted.controller.routing.RoutingStatus;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId;
+import com.yahoo.vespa.hosted.controller.routing.RoutingStatus;
import org.junit.Test;
import java.time.Instant;
import java.util.Iterator;
+import java.util.List;
import java.util.Optional;
import java.util.Set;
@@ -37,24 +37,24 @@ public class RoutingPolicySerializerTest {
var id2 = new RoutingPolicyId(owner,
ClusterSpec.Id.from("my-cluster2"),
ZoneId.from("prod", "us-north-2"));
- var policies = ImmutableMap.of(id1, new RoutingPolicy(id1,
- HostName.from("long-and-ugly-name"),
- Optional.of("zone1"),
- instanceEndpoints,
- applicationEndpoints,
- new RoutingPolicy.Status(true, RoutingStatus.DEFAULT)),
- id2, new RoutingPolicy(id2,
- HostName.from("long-and-ugly-name-2"),
- Optional.empty(),
- instanceEndpoints,
- Set.of(),
- new RoutingPolicy.Status(false,
- new RoutingStatus(RoutingStatus.Value.out,
- RoutingStatus.Agent.tenant,
- Instant.ofEpochSecond(123)))));
+ var policies = List.of(new RoutingPolicy(id1,
+ HostName.from("long-and-ugly-name"),
+ Optional.of("zone1"),
+ instanceEndpoints,
+ applicationEndpoints,
+ new RoutingPolicy.Status(true, RoutingStatus.DEFAULT)),
+ new RoutingPolicy(id2,
+ HostName.from("long-and-ugly-name-2"),
+ Optional.empty(),
+ instanceEndpoints,
+ Set.of(),
+ new RoutingPolicy.Status(false,
+ new RoutingStatus(RoutingStatus.Value.out,
+ RoutingStatus.Agent.tenant,
+ Instant.ofEpochSecond(123)))));
var serialized = serializer.fromSlime(owner, serializer.toSlime(policies));
assertEquals(policies.size(), serialized.size());
- for (Iterator<RoutingPolicy> it1 = policies.values().iterator(), it2 = serialized.values().iterator(); it1.hasNext();) {
+ for (Iterator<RoutingPolicy> it1 = policies.iterator(), it2 = serialized.iterator(); it1.hasNext();) {
var expected = it1.next();
var actual = it2.next();
assertEquals(expected.id(), actual.id());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index 8b8a7fc0c62..6e8445102c3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -358,9 +358,6 @@ public class ApplicationApiTest extends ControllerContainerTest {
ATHENZ_TENANT_DOMAIN_2,
id2.application());
- // Trigger upgrade and then application change
- deploymentTester.applications().deploymentTrigger().triggerChange(id2, Change.of(Version.fromString("7.0")));
-
// POST an application package and a test jar, submitting a new application for production deployment.
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
@@ -389,11 +386,6 @@ public class ApplicationApiTest extends ControllerContainerTest {
// GET application having both change and outstanding change
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
- .userIdentity(USER_ID),
- new File("application2.json"));
-
- // GET application having both change and outstanding change
- tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2", GET)
.screwdriverIdentity(SCREWDRIVER_ID),
new File("application2.json"));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2-with-patches.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2-with-patches.json
index 28732acb1df..df3f9699677 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2-with-patches.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2-with-patches.json
@@ -23,9 +23,6 @@
{
"instance": "instance1",
"deploying": {
- "version": "7"
- },
- "outstandingChange": {
"revision": {
"buildNumber": 1,
"hash": "1.0.1-commit1",
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2.json
index d009af005e4..9ef46247629 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application2.json
@@ -22,9 +22,6 @@
{
"instance": "instance1",
"deploying": {
- "version": "7"
- },
- "outstandingChange": {
"revision": {
"buildNumber": 1,
"hash": "1.0.1-commit1",
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index c40cb20a0bc..1919de33e8b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -41,7 +41,6 @@ import java.time.Duration;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
-import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
@@ -124,22 +123,33 @@ public class RoutingPoliciesTest {
context2.submit(applicationPackage3).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
tester.assertTargets(context2.instanceId(), EndpointId.of("r0"), 0, zone1, zone2);
+ // A deployment of app2 is removed
+ var applicationPackage4 = applicationPackageBuilder()
+ .region(zone1.region())
+ .endpoint("r0", "c0")
+ .allow(ValidationId.globalEndpointChange)
+ .allow(ValidationId.deploymentRemoval)
+ .build();
+ context2.submit(applicationPackage4).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
+ tester.assertTargets(context2.instanceId(), EndpointId.of("r0"), 0, zone1);
+ assertEquals(1, tester.policiesOf(context2.instanceId()).size());
+
// All endpoints for app1 are removed
- ApplicationPackage applicationPackage4 = applicationPackageBuilder()
+ ApplicationPackage applicationPackage5 = applicationPackageBuilder()
.region(zone1.region())
.region(zone2.region())
.region(zone3.region())
.allow(ValidationId.globalEndpointChange)
.build();
- context1.submit(applicationPackage4).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
+ context1.submit(applicationPackage5).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
tester.assertTargets(context1.instanceId(), EndpointId.of("r0"), 0);
tester.assertTargets(context1.instanceId(), EndpointId.of("r1"), 0);
tester.assertTargets(context1.instanceId(), EndpointId.of("r2"), 0);
var policies = tester.policiesOf(context1.instanceId());
assertEquals(clustersPerZone * numberOfDeployments, policies.size());
assertTrue("Rotation membership is removed from all policies",
- policies.stream().allMatch(policy -> policy.instanceEndpoints().isEmpty()));
- assertEquals("Rotations for " + context2.application() + " are not removed", 2, tester.aliasDataOf(endpoint4).size());
+ policies.asList().stream().allMatch(policy -> policy.instanceEndpoints().isEmpty()));
+ assertEquals("Rotations for " + context2.application() + " are not removed", 1, tester.aliasDataOf(endpoint4).size());
}
@Test
@@ -306,8 +316,8 @@ public class RoutingPoliciesTest {
"c1.app1.tenant1.us-central-1.vespa.oath.cloud"
);
assertEquals(expectedRecords, tester.recordNames());
- assertTrue("Removes stale routing policies " + context2.application(), tester.routingPolicies().get(context2.instanceId()).isEmpty());
- assertEquals("Keeps routing policies for " + context1.application(), 4, tester.routingPolicies().get(context1.instanceId()).size());
+ assertTrue("Removes stale routing policies " + context2.application(), tester.routingPolicies().read(context2.instanceId()).isEmpty());
+ assertEquals("Keeps routing policies for " + context1.application(), 4, tester.routingPolicies().read(context1.instanceId()).size());
}
@Test
@@ -490,13 +500,13 @@ public class RoutingPoliciesTest {
tester.assertTargets(context.instanceId(), EndpointId.of("r1"), 0, zone2);
// Status details is stored in policy
- var policy1 = tester.routingPolicies().get(context.deploymentIdIn(zone1)).values().iterator().next();
+ var policy1 = tester.routingPolicies().read(context.deploymentIdIn(zone1)).first().get();
assertEquals(RoutingStatus.Value.out, policy1.status().routingStatus().value());
assertEquals(RoutingStatus.Agent.tenant, policy1.status().routingStatus().agent());
assertEquals(changedAt.truncatedTo(ChronoUnit.MILLIS), policy1.status().routingStatus().changedAt());
// Other zone remains in
- var policy2 = tester.routingPolicies().get(context.deploymentIdIn(zone2)).values().iterator().next();
+ var policy2 = tester.routingPolicies().read(context.deploymentIdIn(zone2)).first().get();
assertEquals(RoutingStatus.Value.in, policy2.status().routingStatus().value());
assertEquals(RoutingStatus.Agent.system, policy2.status().routingStatus().agent());
assertEquals(Instant.EPOCH, policy2.status().routingStatus().changedAt());
@@ -515,7 +525,7 @@ public class RoutingPoliciesTest {
tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, zone1, zone2);
tester.assertTargets(context.instanceId(), EndpointId.of("r1"), 0, zone1, zone2);
- policy1 = tester.routingPolicies().get(context.deploymentIdIn(zone1)).values().iterator().next();
+ policy1 = tester.routingPolicies().read(context.deploymentIdIn(zone1)).first().get();
assertEquals(RoutingStatus.Value.in, policy1.status().routingStatus().value());
assertEquals(RoutingStatus.Agent.tenant, policy1.status().routingStatus().agent());
assertEquals(changedAt.truncatedTo(ChronoUnit.MILLIS), policy1.status().routingStatus().changedAt());
@@ -568,9 +578,9 @@ public class RoutingPoliciesTest {
tester.assertTargets(context1.instanceId(), EndpointId.defaultId(), 0, zone1);
tester.assertTargets(context2.instanceId(), EndpointId.defaultId(), 0, zone1);
for (var context : contexts) {
- var policies = tester.routingPolicies().get(context.instanceId());
+ var policies = tester.routingPolicies().read(context.instanceId());
assertTrue("Global routing status for policy remains " + RoutingStatus.Value.in,
- policies.values().stream()
+ policies.asList().stream()
.map(RoutingPolicy::status)
.map(RoutingPolicy.Status::routingStatus)
.map(RoutingStatus::value)
@@ -668,7 +678,7 @@ public class RoutingPoliciesTest {
// Setting zone (containing active deployment) out puts all deployments in
tester.routingPolicies().setRoutingStatus(zone1, RoutingStatus.Value.out);
context.flushDnsUpdates();
- assertEquals(RoutingStatus.Value.out, tester.routingPolicies().get(zone1).routingStatus().value());
+ assertEquals(RoutingStatus.Value.out, tester.routingPolicies().read(zone1).routingStatus().value());
tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, ImmutableMap.of(zone1, 0L, zone2, 0L));
// Setting zone back in removes the currently inactive deployment
@@ -680,7 +690,7 @@ public class RoutingPoliciesTest {
tester.routingPolicies().setRoutingStatus(context.deploymentIdIn(zone2), RoutingStatus.Value.in,
RoutingStatus.Agent.tenant);
context.flushDnsUpdates();
- for (var policy : tester.routingPolicies().get(context.instanceId()).values()) {
+ for (var policy : tester.routingPolicies().read(context.instanceId())) {
assertSame(RoutingStatus.Value.in, policy.status().routingStatus().value());
}
tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, zone1, zone2);
@@ -693,7 +703,7 @@ public class RoutingPoliciesTest {
RecordName name = RecordName.from("cfg.prod.us-west-1.test.vip");
tester.provisionLoadBalancers(1, app, zone1);
- tester.routingPolicies().refresh(app, DeploymentSpec.empty, zone1);
+ tester.routingPolicies().refresh(new DeploymentId(app, zone1), DeploymentSpec.empty);
new NameServiceDispatcher(tester.tester.controller(), Duration.ofSeconds(Integer.MAX_VALUE)).run();
List<Record> records = tester.controllerTester().nameService().findRecords(Record.Type.CNAME, name);
@@ -793,18 +803,12 @@ public class RoutingPoliciesTest {
var applicationPackage = applicationPackageBuilder()
.instances("beta,main")
.region(zone1.region())
- .region(zone2.region())
.applicationEndpoint("a0", "c0", "us-west-1",
Map.of(betaInstance.instance(), 2,
mainInstance.instance(), 8))
- .applicationEndpoint("a1", "c1", "us-central-1",
- Map.of(betaInstance.instance(), 4,
- mainInstance.instance(), 0))
.build();
- for (var zone : List.of(zone1, zone2)) {
- tester.provisionLoadBalancers(2, betaInstance, zone);
- tester.provisionLoadBalancers(2, mainInstance, zone);
- }
+ tester.provisionLoadBalancers(1, betaInstance, zone1);
+ tester.provisionLoadBalancers(1, mainInstance, zone1);
// Deploy both instances
betaContext.submit(applicationPackage).deploy();
@@ -812,35 +816,36 @@ public class RoutingPoliciesTest {
// Application endpoint points to both instances with correct weights
DeploymentId betaZone1 = betaContext.deploymentIdIn(zone1);
DeploymentId mainZone1 = mainContext.deploymentIdIn(zone1);
- DeploymentId betaZone2 = betaContext.deploymentIdIn(zone2);
- DeploymentId mainZone2 = mainContext.deploymentIdIn(zone2);
tester.assertTargets(application, EndpointId.of("a0"), ClusterSpec.Id.from("c0"), 0,
Map.of(betaZone1, 2,
mainZone1, 8));
- tester.assertTargets(application, EndpointId.of("a1"), ClusterSpec.Id.from("c1"), 1,
- Map.of(betaZone2, 4,
- mainZone2, 0));
- // Changing routing status updates weight
+ // Changing routing status removes deployment from DNS
tester.routingPolicies().setRoutingStatus(mainZone1, RoutingStatus.Value.out, RoutingStatus.Agent.tenant);
betaContext.flushDnsUpdates();
tester.assertTargets(application, EndpointId.of("a0"), ClusterSpec.Id.from("c0"), 0,
- Map.of(betaZone1, 2,
- mainZone1, 0));
- tester.routingPolicies().setRoutingStatus(mainZone1, RoutingStatus.Value.in, RoutingStatus.Agent.tenant);
+ Map.of(betaZone1, 2));
+
+ // Changing routing status for remaining deployment adds back all deployments, because removing all deployments
+ // puts all IN
+ tester.routingPolicies().setRoutingStatus(betaZone1, RoutingStatus.Value.out, RoutingStatus.Agent.tenant);
betaContext.flushDnsUpdates();
tester.assertTargets(application, EndpointId.of("a0"), ClusterSpec.Id.from("c0"), 0,
Map.of(betaZone1, 2,
mainZone1, 8));
- // Changing routing status preserves weights if change in routing status would result in a zero weight sum
- // Otherwise this would result in both targets have weight 0 and thus traffic would be distributed evenly across
- // all targets which does not match intention of taking out a deployment
- tester.routingPolicies().setRoutingStatus(betaZone2, RoutingStatus.Value.out, RoutingStatus.Agent.tenant);
+ // Activating main deployment allows us to deactivate the beta deployment
+ tester.routingPolicies().setRoutingStatus(mainZone1, RoutingStatus.Value.in, RoutingStatus.Agent.tenant);
betaContext.flushDnsUpdates();
- tester.assertTargets(application, EndpointId.of("a1"), ClusterSpec.Id.from("c1"), 1,
- Map.of(betaZone2, 4,
- mainZone2, 0));
+ tester.assertTargets(application, EndpointId.of("a0"), ClusterSpec.Id.from("c0"), 0,
+ Map.of(mainZone1, 8));
+
+ // Activate all deployments again
+ tester.routingPolicies().setRoutingStatus(betaZone1, RoutingStatus.Value.in, RoutingStatus.Agent.tenant);
+ betaContext.flushDnsUpdates();
+ tester.assertTargets(application, EndpointId.of("a0"), ClusterSpec.Id.from("c0"), 0,
+ Map.of(betaZone1, 2,
+ mainZone1, 8));
}
/** Returns an application package builder that satisfies requirements for a directly routed endpoint */
@@ -936,8 +941,8 @@ public class RoutingPoliciesTest {
provisionLoadBalancers(clustersPerZone, application, false, zones);
}
- private Collection<RoutingPolicy> policiesOf(ApplicationId instance) {
- return tester.controller().curator().readRoutingPolicies(instance).values();
+ private RoutingPolicyList policiesOf(ApplicationId instance) {
+ return tester.controller().routing().policies().read(instance);
}
private Set<String> recordNames() {
@@ -995,8 +1000,8 @@ public class RoutingPoliciesTest {
for (var zone : zoneWeights.keySet()) {
DeploymentId deployment = new DeploymentId(instance, zone);
EndpointList regionEndpoints = tester.controller().routing().readEndpointsOf(deployment)
- .cluster(cluster)
- .scope(Endpoint.Scope.weighted);
+ .cluster(cluster)
+ .scope(Endpoint.Scope.weighted);
Endpoint regionEndpoint = regionEndpoints.first().orElseThrow(() -> new IllegalArgumentException("No region endpoint found for " + cluster + " in " + deployment));
zonesByRegionEndpoint.computeIfAbsent(regionEndpoint.dnsName(), (k) -> new ArrayList<>())
.add(zone);
diff --git a/dist/vespa.spec b/dist/vespa.spec
index e53444c9d13..1416109ab40 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -542,7 +542,7 @@ mvn --batch-mode -e -N io.takari:maven:wrapper -Dmaven=3.6.3
.
make %{_smp_mflags}
-make -C client/go
+VERSION=%{version} make -C client/go
%endif
%install
@@ -554,6 +554,11 @@ cp -r %{installdir} %{buildroot}
make install DESTDIR=%{buildroot}
cp client/go/bin/vespa %{buildroot}%{_prefix}/bin/vespa
%endif
+# Otherwise installation may fail for find-debuginfo.sh/dwz:
+# dwz: dwz.c:9899: read_dwarf: Assertion `data != ((void *)0) && data->d_buf != ((void *)0)' failed.
+%if 0%{?el7}
+strip %{buildroot}%{_prefix}/bin/vespa
+%endif
%if %{_create_vespa_service}
mkdir -p %{buildroot}/usr/lib/systemd/system
diff --git a/document/src/main/java/com/yahoo/document/DocumentTypeManager.java b/document/src/main/java/com/yahoo/document/DocumentTypeManager.java
index a6f2923d68f..55320eb8501 100644
--- a/document/src/main/java/com/yahoo/document/DocumentTypeManager.java
+++ b/document/src/main/java/com/yahoo/document/DocumentTypeManager.java
@@ -165,6 +165,7 @@ public class DocumentTypeManager {
}
}
+ @SuppressWarnings("deprecation")
DataType getDataTypeAndReturnTemporary(int code, String detailedType) {
if (hasDataType(code)) {
return getDataType(code, detailedType);
@@ -186,6 +187,7 @@ public class DocumentTypeManager {
*
* @param type The datatype to register
*/
+ @SuppressWarnings("deprecation")
void registerSingleType(DataType type) {
if (type instanceof TensorDataType) return; // built-in dynamic: Created on the fly
if (dataTypes.containsKey(type.getId())) {
@@ -315,6 +317,7 @@ public class DocumentTypeManager {
}
}
+ @SuppressWarnings("deprecation")
private void replaceTemporaryTypes(DataType type, List<DataType> seenStructs) {
if (type instanceof WeightedSetDataType) {
replaceTemporaryTypesInWeightedSet((WeightedSetDataType) type, seenStructs);
@@ -356,6 +359,7 @@ public class DocumentTypeManager {
}
}
+ @SuppressWarnings("deprecation")
private void replaceTemporaryTypeInReference(ReferenceDataType referenceDataType) {
if (referenceDataType.getTargetType() instanceof TemporaryStructuredDataType) {
referenceDataType.setTargetType((DocumentType) getDataType(referenceDataType.getTargetType().getId()));
@@ -363,6 +367,7 @@ public class DocumentTypeManager {
// TODO should we recursively invoke replaceTemporaryTypes for the target type? It should only ever be a doc type
}
+ @SuppressWarnings("deprecation")
private void replaceTemporaryTypesInCollection(CollectionDataType collectionDataType, List<DataType> seenStructs) {
if (collectionDataType.getNestedType() instanceof TemporaryDataType) {
collectionDataType.setNestedType(getDataType(collectionDataType.getNestedType().getCode(), ""));
@@ -371,6 +376,7 @@ public class DocumentTypeManager {
}
}
+ @SuppressWarnings("deprecation")
private void replaceTemporaryTypesInMap(MapDataType mapDataType, List<DataType> seenStructs) {
if (mapDataType.getValueType() instanceof TemporaryDataType) {
mapDataType.setValueType(getDataType(mapDataType.getValueType().getCode(), ""));
@@ -385,6 +391,7 @@ public class DocumentTypeManager {
}
}
+ @SuppressWarnings("deprecation")
private void replaceTemporaryTypesInWeightedSet(WeightedSetDataType weightedSetDataType, List<DataType> seenStructs) {
if (weightedSetDataType.getNestedType() instanceof TemporaryDataType) {
weightedSetDataType.setNestedType(getDataType(weightedSetDataType.getNestedType().getCode(), ""));
diff --git a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java
index 5ba3f4d6a6a..c545b6f1491 100644
--- a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java
+++ b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java
@@ -194,6 +194,7 @@ public class DocumentTypeManagerConfigurer implements ConfigSubscriber.SingleSub
manager.register(type);
}
+ @SuppressWarnings("deprecation")
private void registerReferenceType(int id, DocumentmanagerConfig.Datatype.Referencetype refType) {
ReferenceDataType referenceType;
if (manager.hasDataType(refType.target_type_id())) {
diff --git a/document/src/main/java/com/yahoo/document/ReferenceDataType.java b/document/src/main/java/com/yahoo/document/ReferenceDataType.java
index 78a30c0fcf2..c3b5f6590b6 100644
--- a/document/src/main/java/com/yahoo/document/ReferenceDataType.java
+++ b/document/src/main/java/com/yahoo/document/ReferenceDataType.java
@@ -27,6 +27,7 @@ public class ReferenceDataType extends DataType {
* of the target document type might not yet be known. The temporary data type should be
* replaced later using setTargetType().
*/
+ @SuppressWarnings("deprecation")
public ReferenceDataType(TemporaryStructuredDataType temporaryTargetType, int id) {
this((StructuredDataType) temporaryTargetType, id);
}
@@ -54,6 +55,7 @@ public class ReferenceDataType extends DataType {
/**
* Creates a new type where the numeric ID is based on the hash of targetType
*/
+ @SuppressWarnings("deprecation")
public static ReferenceDataType createWithInferredId(TemporaryStructuredDataType targetType) {
return new ReferenceDataType(targetType);
}
@@ -67,6 +69,7 @@ public class ReferenceDataType extends DataType {
* @throws IllegalStateException if the previously stored target type is already a concrete
* instance (not TemporaryStructuredDataType).
*/
+ @SuppressWarnings("deprecation")
public void setTargetType(StructuredDataType targetType) {
if (! (this.targetType instanceof TemporaryStructuredDataType)) {
throw new IllegalStateException(String.format(
diff --git a/document/src/main/java/com/yahoo/document/TemporaryDataType.java b/document/src/main/java/com/yahoo/document/TemporaryDataType.java
index c32e271737c..71f36ecde90 100644
--- a/document/src/main/java/com/yahoo/document/TemporaryDataType.java
+++ b/document/src/main/java/com/yahoo/document/TemporaryDataType.java
@@ -5,7 +5,9 @@ import com.yahoo.document.datatypes.FieldValue;
/**
* @author Einar M R Rosenvinge
+ * @deprecated will be removed soon
*/
+@Deprecated
class TemporaryDataType extends DataType {
private final String detailedType;
diff --git a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
index 9d648367c4e..865310e7009 100644
--- a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
+++ b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
@@ -5,8 +5,10 @@ package com.yahoo.document;
* Internal class, DO NOT USE!!
* Only public because it must be used from com.yahoo.searchdefinition.parser.
*
+ * @deprecated will be removed soon
* @author Einar M R Rosenvinge
*/
+@Deprecated // TODO: Remove on Vespa 8
public class TemporaryStructuredDataType extends StructDataType {
TemporaryStructuredDataType(String name) {
diff --git a/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java b/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java
index 1a0e2ad1e2b..3fa81dd8d5b 100644
--- a/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java
+++ b/document/src/test/java/com/yahoo/document/DocumentTypeManagerTestCase.java
@@ -525,6 +525,7 @@ search annotationsimplicitstruct {
assertReferenceTypePresentInManager(manager, 87654321, "referenced_type2");
}
+ @SuppressWarnings("deprecation")
@Test
public void no_temporary_targets_in_references_or_names() {
DocumentTypeManager manager = createConfiguredManager("file:src/test/document/documentmanager.replaced_temporary.cfg");
diff --git a/document/src/test/java/com/yahoo/document/ReferenceDataTypeTestCase.java b/document/src/test/java/com/yahoo/document/ReferenceDataTypeTestCase.java
index d44f0880395..53c8a0ecc94 100644
--- a/document/src/test/java/com/yahoo/document/ReferenceDataTypeTestCase.java
+++ b/document/src/test/java/com/yahoo/document/ReferenceDataTypeTestCase.java
@@ -72,6 +72,7 @@ public class ReferenceDataTypeTestCase {
assertTrue(fixture.refType.isValueCompatible(fixture.refTypeClone.createFieldValue()));
}
+ @SuppressWarnings("deprecation")
@Test
public void reference_type_can_be_constructed_with_temporary_structured_data_type() {
TemporaryStructuredDataType tempType = new TemporaryStructuredDataType("cooldoc");
@@ -81,6 +82,7 @@ public class ReferenceDataTypeTestCase {
assertEquals(tempType, refType.getTargetType());
}
+ @SuppressWarnings("deprecation")
@Test
public void can_replace_temporary_target_data_type() {
TemporaryStructuredDataType tempType = new TemporaryStructuredDataType("cooldoc");
diff --git a/document/src/test/java/com/yahoo/document/TemporaryDataTypeTestCase.java b/document/src/test/java/com/yahoo/document/TemporaryDataTypeTestCase.java
index 0e85e085316..80154891d83 100644
--- a/document/src/test/java/com/yahoo/document/TemporaryDataTypeTestCase.java
+++ b/document/src/test/java/com/yahoo/document/TemporaryDataTypeTestCase.java
@@ -10,6 +10,7 @@ import static org.junit.Assert.assertNull;
/**
* @author Einar M R Rosenvinge
*/
+@SuppressWarnings("deprecation")
public class TemporaryDataTypeTestCase {
@Test
diff --git a/document/src/test/java/com/yahoo/document/TemporaryStructuredDataTypeTestCase.java b/document/src/test/java/com/yahoo/document/TemporaryStructuredDataTypeTestCase.java
index 5e4530d0886..3b5cd29b90d 100644
--- a/document/src/test/java/com/yahoo/document/TemporaryStructuredDataTypeTestCase.java
+++ b/document/src/test/java/com/yahoo/document/TemporaryStructuredDataTypeTestCase.java
@@ -10,6 +10,7 @@ import static org.junit.Assert.assertNotEquals;
* @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
* @since 5.1.10
*/
+@SuppressWarnings("deprecation")
public class TemporaryStructuredDataTypeTestCase {
@Test
public void basic() {
diff --git a/jdisc_core/abi-spec.json b/jdisc_core/abi-spec.json
new file mode 100644
index 00000000000..497fdfad501
--- /dev/null
+++ b/jdisc_core/abi-spec.json
@@ -0,0 +1,1265 @@
+{
+ "com.yahoo.jdisc.AbstractResource": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.SharedResource"
+ ],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "protected void <init>()",
+ "public final com.yahoo.jdisc.ResourceReference refer()",
+ "public final com.yahoo.jdisc.ResourceReference refer(java.lang.Object)",
+ "public final void release()",
+ "public final int retainCount()",
+ "protected void destroy()",
+ "public java.lang.String currentState()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.Container": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.SharedResource",
+ "com.yahoo.jdisc.Timer"
+ ],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract com.yahoo.jdisc.handler.RequestHandler resolveHandler(com.yahoo.jdisc.Request)",
+ "public abstract java.lang.Object getInstance(com.google.inject.Key)",
+ "public abstract java.lang.Object getInstance(java.lang.Class)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.HeaderFields": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.util.Map"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public int size()",
+ "public boolean isEmpty()",
+ "public boolean containsKey(java.lang.Object)",
+ "public boolean containsValue(java.lang.Object)",
+ "public boolean contains(java.lang.String, java.lang.String)",
+ "public boolean containsIgnoreCase(java.lang.String, java.lang.String)",
+ "public void add(java.lang.String, java.lang.String)",
+ "public void add(java.lang.String, java.util.List)",
+ "public void addAll(java.util.Map)",
+ "public java.util.List put(java.lang.String, java.lang.String)",
+ "public java.util.List put(java.lang.String, java.util.List)",
+ "public void putAll(java.util.Map)",
+ "public java.util.List remove(java.lang.Object)",
+ "public boolean remove(java.lang.String, java.lang.String)",
+ "public void clear()",
+ "public java.util.List get(java.lang.Object)",
+ "public java.lang.String getFirst(java.lang.String)",
+ "public boolean isTrue(java.lang.String)",
+ "public java.util.Set keySet()",
+ "public java.util.Collection values()",
+ "public java.util.Set entrySet()",
+ "public java.lang.String toString()",
+ "public java.util.List entries()",
+ "public boolean equals(java.lang.Object)",
+ "public int hashCode()",
+ "public bridge synthetic java.lang.Object remove(java.lang.Object)",
+ "public bridge synthetic java.lang.Object put(java.lang.Object, java.lang.Object)",
+ "public bridge synthetic java.lang.Object get(java.lang.Object)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.Metric$Context": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [],
+ "fields": []
+ },
+ "com.yahoo.jdisc.Metric": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract void set(java.lang.String, java.lang.Number, com.yahoo.jdisc.Metric$Context)",
+ "public abstract void add(java.lang.String, java.lang.Number, com.yahoo.jdisc.Metric$Context)",
+ "public abstract com.yahoo.jdisc.Metric$Context createContext(java.util.Map)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.NoopSharedResource": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.SharedResource"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public final com.yahoo.jdisc.ResourceReference refer()",
+ "public final com.yahoo.jdisc.ResourceReference refer(java.lang.Object)",
+ "public final void release()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.ReferencedResource": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.lang.AutoCloseable"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.jdisc.SharedResource, com.yahoo.jdisc.ResourceReference)",
+ "public com.yahoo.jdisc.SharedResource getResource()",
+ "public com.yahoo.jdisc.ResourceReference getReference()",
+ "public void close()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.References": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static com.yahoo.jdisc.ResourceReference fromResource(com.yahoo.jdisc.SharedResource)"
+ ],
+ "fields": [
+ "public static final com.yahoo.jdisc.ResourceReference NOOP_REFERENCE"
+ ]
+ },
+ "com.yahoo.jdisc.Request$RequestType": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static com.yahoo.jdisc.Request$RequestType[] values()",
+ "public static com.yahoo.jdisc.Request$RequestType valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum com.yahoo.jdisc.Request$RequestType READ",
+ "public static final enum com.yahoo.jdisc.Request$RequestType WRITE",
+ "public static final enum com.yahoo.jdisc.Request$RequestType MONITORING"
+ ]
+ },
+ "com.yahoo.jdisc.Request": {
+ "superClass": "com.yahoo.jdisc.AbstractResource",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.jdisc.service.CurrentContainer, java.net.URI)",
+ "public void <init>(com.yahoo.jdisc.service.CurrentContainer, java.net.URI, boolean)",
+ "public void <init>(com.yahoo.jdisc.Request, java.net.URI)",
+ "public com.yahoo.jdisc.Container container()",
+ "public java.net.URI getUri()",
+ "public com.yahoo.jdisc.Request setUri(java.net.URI)",
+ "public boolean isServerRequest()",
+ "public com.yahoo.jdisc.Request setServerRequest(boolean)",
+ "public com.yahoo.jdisc.application.BindingMatch getBindingMatch()",
+ "public com.yahoo.jdisc.Request setBindingMatch(com.yahoo.jdisc.application.BindingMatch)",
+ "public java.util.Map context()",
+ "public com.yahoo.jdisc.HeaderFields headers()",
+ "public void setTimeoutManager(com.yahoo.jdisc.TimeoutManager)",
+ "public com.yahoo.jdisc.TimeoutManager getTimeoutManager()",
+ "public void setTimeout(long, java.util.concurrent.TimeUnit)",
+ "public java.lang.Long getTimeout(java.util.concurrent.TimeUnit)",
+ "public java.lang.Long timeRemaining(java.util.concurrent.TimeUnit)",
+ "public long timeElapsed(java.util.concurrent.TimeUnit)",
+ "public long creationTime(java.util.concurrent.TimeUnit)",
+ "public boolean isCancelled()",
+ "public void cancel()",
+ "public com.yahoo.jdisc.handler.ContentChannel connect(com.yahoo.jdisc.handler.ResponseHandler)",
+ "protected void destroy()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.ResourceReference": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.lang.AutoCloseable"
+ ],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract void close()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.Response$Status": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [],
+ "fields": [
+ "public static final int CONTINUE",
+ "public static final int SWITCHING_PROTOCOLS",
+ "public static final int PROCESSING",
+ "public static final int OK",
+ "public static final int CREATED",
+ "public static final int ACCEPTED",
+ "public static final int NON_AUTHORITATIVE_INFORMATION",
+ "public static final int NO_CONTENT",
+ "public static final int RESET_CONTENT",
+ "public static final int PARTIAL_CONTENT",
+ "public static final int MULTI_STATUS",
+ "public static final int MULTIPLE_CHOICES",
+ "public static final int MOVED_PERMANENTLY",
+ "public static final int FOUND",
+ "public static final int SEE_OTHER",
+ "public static final int NOT_MODIFIED",
+ "public static final int USE_PROXY",
+ "public static final int TEMPORARY_REDIRECT",
+ "public static final int BAD_REQUEST",
+ "public static final int UNAUTHORIZED",
+ "public static final int PAYMENT_REQUIRED",
+ "public static final int FORBIDDEN",
+ "public static final int NOT_FOUND",
+ "public static final int METHOD_NOT_ALLOWED",
+ "public static final int NOT_ACCEPTABLE",
+ "public static final int PROXY_AUTHENTICATION_REQUIRED",
+ "public static final int REQUEST_TIMEOUT",
+ "public static final int CONFLICT",
+ "public static final int GONE",
+ "public static final int LENGTH_REQUIRED",
+ "public static final int PRECONDITION_FAILED",
+ "public static final int REQUEST_TOO_LONG",
+ "public static final int REQUEST_URI_TOO_LONG",
+ "public static final int UNSUPPORTED_MEDIA_TYPE",
+ "public static final int REQUESTED_RANGE_NOT_SATISFIABLE",
+ "public static final int EXPECTATION_FAILED",
+ "public static final int INSUFFICIENT_SPACE_ON_RESOURCE",
+ "public static final int METHOD_FAILURE",
+ "public static final int UNPROCESSABLE_ENTITY",
+ "public static final int LOCKED",
+ "public static final int FAILED_DEPENDENCY",
+ "public static final int TOO_MANY_REQUESTS",
+ "public static final int INTERNAL_SERVER_ERROR",
+ "public static final int NOT_IMPLEMENTED",
+ "public static final int BAD_GATEWAY",
+ "public static final int SERVICE_UNAVAILABLE",
+ "public static final int GATEWAY_TIMEOUT",
+ "public static final int VERSION_NOT_SUPPORTED",
+ "public static final int INSUFFICIENT_STORAGE"
+ ]
+ },
+ "com.yahoo.jdisc.Response": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(int)",
+ "public void <init>(int, java.lang.Throwable)",
+ "public java.util.Map context()",
+ "public com.yahoo.jdisc.HeaderFields headers()",
+ "public int getStatus()",
+ "public com.yahoo.jdisc.Response setStatus(int)",
+ "public java.lang.Throwable getError()",
+ "public com.yahoo.jdisc.Response setError(java.lang.Throwable)",
+ "public void setRequestType(com.yahoo.jdisc.Request$RequestType)",
+ "public com.yahoo.jdisc.Request$RequestType getRequestType()",
+ "public static void dispatchTimeout(com.yahoo.jdisc.handler.ResponseHandler)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.SharedResource$Debug": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static com.yahoo.jdisc.SharedResource$Debug[] values()",
+ "public static com.yahoo.jdisc.SharedResource$Debug valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum com.yahoo.jdisc.SharedResource$Debug NO",
+ "public static final enum com.yahoo.jdisc.SharedResource$Debug SIMPLE",
+ "public static final enum com.yahoo.jdisc.SharedResource$Debug STACK"
+ ]
+ },
+ "com.yahoo.jdisc.SharedResource": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public com.yahoo.jdisc.ResourceReference refer()",
+ "public com.yahoo.jdisc.ResourceReference refer(java.lang.Object)",
+ "public abstract void release()"
+ ],
+ "fields": [
+ "public static final java.lang.String SYSTEM_PROPERTY_NAME_DEBUG",
+ "public static final com.yahoo.jdisc.SharedResource$Debug DEBUG"
+ ]
+ },
+ "com.yahoo.jdisc.TimeoutManager": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract void scheduleTimeout(com.yahoo.jdisc.Request)",
+ "public void unscheduleTimeout(com.yahoo.jdisc.Request)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.Timer$ClockAdapter": {
+ "superClass": "java.time.Clock",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public java.time.ZoneId getZone()",
+ "public java.time.Clock withZone(java.time.ZoneId)",
+ "public java.time.Instant instant()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.Timer": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract long currentTimeMillis()",
+ "public java.time.Instant currentTime()",
+ "public java.time.Clock toUtcClock()",
+ "public static com.yahoo.jdisc.Timer fromClock(java.time.Clock)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.AbstractRequestHandler": {
+ "superClass": "com.yahoo.jdisc.AbstractResource",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.RequestHandler"
+ ],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void handleTimeout(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.BindingNotFoundException": {
+ "superClass": "java.lang.RuntimeException",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>(java.net.URI)",
+ "public java.net.URI uri()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.BlockingContentWriter": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.jdisc.handler.ContentChannel)",
+ "public void write(java.nio.ByteBuffer)",
+ "public void close()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.BufferedContentChannel": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.ContentChannel"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void connectTo(com.yahoo.jdisc.handler.ContentChannel)",
+ "public boolean isConnected()",
+ "public com.yahoo.jdisc.handler.ReadableContentChannel toReadable()",
+ "public com.yahoo.jdisc.handler.ContentInputStream toStream()",
+ "public void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)",
+ "public void close(com.yahoo.jdisc.handler.CompletionHandler)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.CallableRequestDispatch": {
+ "superClass": "com.yahoo.jdisc.handler.RequestDispatch",
+ "interfaces": [
+ "java.util.concurrent.Callable"
+ ],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public final com.yahoo.jdisc.Response call()",
+ "public bridge synthetic java.lang.Object call()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.CallableResponseDispatch": {
+ "superClass": "com.yahoo.jdisc.handler.ResponseDispatch",
+ "interfaces": [
+ "java.util.concurrent.Callable"
+ ],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.jdisc.handler.ResponseHandler)",
+ "public final java.lang.Boolean call()",
+ "public bridge synthetic java.lang.Object call()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.CompletionHandler": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract void completed()",
+ "public abstract void failed(java.lang.Throwable)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.ContentChannel": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)",
+ "public abstract void close(com.yahoo.jdisc.handler.CompletionHandler)",
+ "public void onError(java.lang.Throwable)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.ContentInputStream": {
+ "superClass": "com.yahoo.jdisc.handler.UnsafeContentInputStream",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.jdisc.handler.ReadableContentChannel)",
+ "public void finalize()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.DelegatedRequestHandler": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.RequestHandler"
+ ],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract com.yahoo.jdisc.handler.RequestHandler getDelegate()",
+ "public com.yahoo.jdisc.handler.RequestHandler getDelegateRecursive()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.FastContentOutputStream": {
+ "superClass": "com.yahoo.jdisc.handler.AbstractContentOutputStream",
+ "interfaces": [
+ "com.google.common.util.concurrent.ListenableFuture"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.jdisc.handler.ContentChannel)",
+ "public void <init>(com.yahoo.jdisc.handler.FastContentWriter)",
+ "protected void doFlush(java.nio.ByteBuffer)",
+ "protected void doClose()",
+ "public boolean cancel(boolean)",
+ "public boolean isCancelled()",
+ "public boolean isDone()",
+ "public java.lang.Boolean get()",
+ "public java.lang.Boolean get(long, java.util.concurrent.TimeUnit)",
+ "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)",
+ "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)",
+ "public bridge synthetic java.lang.Object get()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.FastContentWriter": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.google.common.util.concurrent.ListenableFuture",
+ "java.lang.AutoCloseable"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.jdisc.handler.ContentChannel)",
+ "public void write(java.lang.String)",
+ "public void write(byte[])",
+ "public void write(byte[], int, int)",
+ "public void write(java.nio.ByteBuffer)",
+ "public void close()",
+ "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)",
+ "public boolean cancel(boolean)",
+ "public boolean isCancelled()",
+ "public boolean isDone()",
+ "public java.lang.Boolean get()",
+ "public java.lang.Boolean get(long, java.util.concurrent.TimeUnit)",
+ "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)",
+ "public bridge synthetic java.lang.Object get()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.FutureCompletion": {
+ "superClass": "com.google.common.util.concurrent.AbstractFuture",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.CompletionHandler"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void completed()",
+ "public void failed(java.lang.Throwable)",
+ "public final boolean cancel(boolean)",
+ "public final boolean isCancelled()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.FutureConjunction": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.google.common.util.concurrent.ListenableFuture"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void addOperand(com.google.common.util.concurrent.ListenableFuture)",
+ "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)",
+ "public final boolean cancel(boolean)",
+ "public final boolean isCancelled()",
+ "public final boolean isDone()",
+ "public final java.lang.Boolean get()",
+ "public final java.lang.Boolean get(long, java.util.concurrent.TimeUnit)",
+ "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)",
+ "public bridge synthetic java.lang.Object get()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.FutureResponse": {
+ "superClass": "com.google.common.util.concurrent.AbstractFuture",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.ResponseHandler"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void <init>(com.yahoo.jdisc.handler.ContentChannel)",
+ "public void <init>(com.yahoo.jdisc.handler.ResponseHandler)",
+ "public com.yahoo.jdisc.handler.ContentChannel handleResponse(com.yahoo.jdisc.Response)",
+ "public final boolean cancel(boolean)",
+ "public final boolean isCancelled()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.NullContent": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.ContentChannel"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)",
+ "public void close(com.yahoo.jdisc.handler.CompletionHandler)"
+ ],
+ "fields": [
+ "public static final com.yahoo.jdisc.handler.NullContent INSTANCE"
+ ]
+ },
+ "com.yahoo.jdisc.handler.OverloadException": {
+ "superClass": "java.lang.RuntimeException",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String, java.lang.Throwable)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.ReadableContentChannel": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.ContentChannel",
+ "java.lang.Iterable"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)",
+ "public void close(com.yahoo.jdisc.handler.CompletionHandler)",
+ "public java.util.Iterator iterator()",
+ "public int available()",
+ "public java.nio.ByteBuffer read()",
+ "public void failed(java.lang.Throwable)",
+ "public com.yahoo.jdisc.handler.ContentInputStream toStream()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.RequestDeniedException": {
+ "superClass": "java.lang.RuntimeException",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.jdisc.Request)",
+ "public com.yahoo.jdisc.Request request()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.RequestDispatch": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.google.common.util.concurrent.ListenableFuture",
+ "com.yahoo.jdisc.handler.ResponseHandler"
+ ],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>()",
+ "protected abstract com.yahoo.jdisc.Request newRequest()",
+ "protected java.lang.Iterable requestContent()",
+ "public final com.yahoo.jdisc.handler.ContentChannel connect()",
+ "public final com.yahoo.jdisc.handler.FastContentWriter connectFastWriter()",
+ "public final com.google.common.util.concurrent.ListenableFuture dispatch()",
+ "public void addListener(java.lang.Runnable, java.util.concurrent.Executor)",
+ "public final boolean cancel(boolean)",
+ "public final boolean isCancelled()",
+ "public final boolean isDone()",
+ "public final com.yahoo.jdisc.Response get()",
+ "public final com.yahoo.jdisc.Response get(long, java.util.concurrent.TimeUnit)",
+ "public com.yahoo.jdisc.handler.ContentChannel handleResponse(com.yahoo.jdisc.Response)",
+ "public bridge synthetic java.lang.Object get(long, java.util.concurrent.TimeUnit)",
+ "public bridge synthetic java.lang.Object get()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.RequestHandler": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.SharedResource"
+ ],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract com.yahoo.jdisc.handler.ContentChannel handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)",
+ "public abstract void handleTimeout(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.ResponseDispatch": {
+ "superClass": "com.google.common.util.concurrent.ForwardingListenableFuture",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>()",
+ "protected abstract com.yahoo.jdisc.Response newResponse()",
+ "protected java.lang.Iterable responseContent()",
+ "public final com.yahoo.jdisc.handler.ContentChannel connect(com.yahoo.jdisc.handler.ResponseHandler)",
+ "public final com.yahoo.jdisc.handler.FastContentWriter connectFastWriter(com.yahoo.jdisc.handler.ResponseHandler)",
+ "public final com.google.common.util.concurrent.ListenableFuture dispatch(com.yahoo.jdisc.handler.ResponseHandler)",
+ "protected final com.google.common.util.concurrent.ListenableFuture delegate()",
+ "public final boolean cancel(boolean)",
+ "public final boolean isCancelled()",
+ "public static varargs com.yahoo.jdisc.handler.ResponseDispatch newInstance(int, java.nio.ByteBuffer[])",
+ "public static com.yahoo.jdisc.handler.ResponseDispatch newInstance(int, java.lang.Iterable)",
+ "public static varargs com.yahoo.jdisc.handler.ResponseDispatch newInstance(com.yahoo.jdisc.Response, java.nio.ByteBuffer[])",
+ "public static com.yahoo.jdisc.handler.ResponseDispatch newInstance(com.yahoo.jdisc.Response, java.lang.Iterable)",
+ "protected bridge synthetic java.util.concurrent.Future delegate()",
+ "protected bridge synthetic java.lang.Object delegate()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.ResponseHandler": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract com.yahoo.jdisc.handler.ContentChannel handleResponse(com.yahoo.jdisc.Response)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.ThreadedRequestHandler": {
+ "superClass": "com.yahoo.jdisc.handler.AbstractRequestHandler",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "protected void <init>(java.util.concurrent.Executor)",
+ "public final void setTimeout(long, java.util.concurrent.TimeUnit)",
+ "public final long getTimeout(java.util.concurrent.TimeUnit)",
+ "public final com.yahoo.jdisc.handler.ContentChannel handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)",
+ "protected void handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.BufferedContentChannel, com.yahoo.jdisc.handler.ResponseHandler)",
+ "protected void handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ReadableContentChannel, com.yahoo.jdisc.handler.ResponseHandler)",
+ "protected void handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ContentInputStream, com.yahoo.jdisc.handler.ResponseHandler)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.handler.UnsafeContentInputStream": {
+ "superClass": "java.io.InputStream",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.jdisc.handler.ReadableContentChannel)",
+ "public int read()",
+ "public int read(byte[], int, int)",
+ "public int available()",
+ "public void close()",
+ "public synchronized void mark(int)",
+ "public synchronized void reset()",
+ "public boolean markSupported()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.service.AbstractClientProvider": {
+ "superClass": "com.yahoo.jdisc.handler.AbstractRequestHandler",
+ "interfaces": [
+ "com.yahoo.jdisc.service.ClientProvider"
+ ],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void start()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.service.AbstractServerProvider": {
+ "superClass": "com.yahoo.jdisc.AbstractResource",
+ "interfaces": [
+ "com.yahoo.jdisc.service.ServerProvider"
+ ],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "protected void <init>(com.yahoo.jdisc.service.CurrentContainer)",
+ "public final com.yahoo.jdisc.service.CurrentContainer container()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.service.BindingSetNotFoundException": {
+ "superClass": "java.lang.RuntimeException",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public java.lang.String bindingSet()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.service.ClientProvider": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.RequestHandler"
+ ],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract void start()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.service.ContainerNotReadyException": {
+ "superClass": "java.lang.RuntimeException",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.service.CurrentContainer": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public com.yahoo.jdisc.Container newReference(java.net.URI, java.lang.Object)",
+ "public com.yahoo.jdisc.Container newReference(java.net.URI)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.service.NoBindingSetSelectedException": {
+ "superClass": "java.lang.RuntimeException",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>(java.net.URI)",
+ "public java.net.URI uri()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.service.ServerProvider": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.SharedResource"
+ ],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract void start()",
+ "public abstract void close()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.MockMetric": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.Metric"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void set(java.lang.String, java.lang.Number, com.yahoo.jdisc.Metric$Context)",
+ "public void add(java.lang.String, java.lang.Number, com.yahoo.jdisc.Metric$Context)",
+ "public com.yahoo.jdisc.Metric$Context createContext(java.util.Map)",
+ "public java.util.Map metrics()",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.NonWorkingClientProvider": {
+ "superClass": "com.yahoo.jdisc.NoopSharedResource",
+ "interfaces": [
+ "com.yahoo.jdisc.service.ClientProvider"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void start()",
+ "public com.yahoo.jdisc.handler.ContentChannel handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)",
+ "public void handleTimeout(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.NonWorkingCompletionHandler": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.CompletionHandler"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void completed()",
+ "public void failed(java.lang.Throwable)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.NonWorkingContentChannel": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.ContentChannel"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void write(java.nio.ByteBuffer, com.yahoo.jdisc.handler.CompletionHandler)",
+ "public void close(com.yahoo.jdisc.handler.CompletionHandler)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.NonWorkingOsgiFramework": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.application.OsgiFramework"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public java.util.List installBundle(java.lang.String)",
+ "public void startBundles(java.util.List, boolean)",
+ "public void refreshPackages()",
+ "public org.osgi.framework.BundleContext bundleContext()",
+ "public java.util.List bundles()",
+ "public java.util.List getBundles(org.osgi.framework.Bundle)",
+ "public void allowDuplicateBundles(java.util.Collection)",
+ "public void start()",
+ "public void stop()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.NonWorkingRequest": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public static varargs com.yahoo.jdisc.Request newInstance(java.lang.String, com.google.inject.Module[])"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.NonWorkingRequestHandler": {
+ "superClass": "com.yahoo.jdisc.NoopSharedResource",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.RequestHandler"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public com.yahoo.jdisc.handler.ContentChannel handleRequest(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)",
+ "public void handleTimeout(com.yahoo.jdisc.Request, com.yahoo.jdisc.handler.ResponseHandler)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.NonWorkingResponseHandler": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.handler.ResponseHandler"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public com.yahoo.jdisc.handler.ContentChannel handleResponse(com.yahoo.jdisc.Response)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.NonWorkingServerProvider": {
+ "superClass": "com.yahoo.jdisc.NoopSharedResource",
+ "interfaces": [
+ "com.yahoo.jdisc.service.ServerProvider"
+ ],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void start()",
+ "public void close()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.ServerProviderConformanceTest$Adapter": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract com.google.inject.Module newConfigModule()",
+ "public abstract java.lang.Class getServerProviderClass()",
+ "public abstract java.lang.Object newClient(com.yahoo.jdisc.service.ServerProvider)",
+ "public abstract java.lang.Object executeRequest(java.lang.Object, boolean)",
+ "public abstract java.lang.Iterable newResponseContent()",
+ "public abstract void validateResponse(java.lang.Object)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.ServerProviderConformanceTest$ConformanceException": {
+ "superClass": "java.lang.RuntimeException",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void <init>(com.yahoo.jdisc.test.ServerProviderConformanceTest$Event)",
+ "public java.lang.String getMessage()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.ServerProviderConformanceTest": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public abstract void testContainerNotReadyException()",
+ "public abstract void testBindingSetNotFoundException()",
+ "public abstract void testNoBindingSetSelectedException()",
+ "public abstract void testBindingNotFoundException()",
+ "public abstract void testRequestHandlerWithSyncCloseResponse()",
+ "public abstract void testRequestHandlerWithSyncWriteResponse()",
+ "public abstract void testRequestHandlerWithSyncHandleResponse()",
+ "public abstract void testRequestHandlerWithAsyncHandleResponse()",
+ "public abstract void testRequestException()",
+ "public abstract void testRequestExceptionWithSyncCloseResponse()",
+ "public abstract void testRequestExceptionWithSyncWriteResponse()",
+ "public abstract void testRequestNondeterministicExceptionWithSyncHandleResponse()",
+ "public abstract void testRequestExceptionBeforeResponseWriteWithSyncHandleResponse()",
+ "public abstract void testRequestExceptionAfterResponseWriteWithSyncHandleResponse()",
+ "public abstract void testRequestNondeterministicExceptionWithAsyncHandleResponse()",
+ "public abstract void testRequestExceptionBeforeResponseWriteWithAsyncHandleResponse()",
+ "public abstract void testRequestExceptionAfterResponseCloseNoContentWithAsyncHandleResponse()",
+ "public abstract void testRequestExceptionAfterResponseWriteWithAsyncHandleResponse()",
+ "public abstract void testRequestContentWriteWithSyncCompletion()",
+ "public abstract void testRequestContentWriteWithAsyncCompletion()",
+ "public abstract void testRequestContentWriteWithNondeterministicSyncFailure()",
+ "public abstract void testRequestContentWriteWithSyncFailureBeforeResponseWrite()",
+ "public abstract void testRequestContentWriteWithSyncFailureAfterResponseWrite()",
+ "public abstract void testRequestContentWriteWithNondeterministicAsyncFailure()",
+ "public abstract void testRequestContentWriteWithAsyncFailureBeforeResponseWrite()",
+ "public abstract void testRequestContentWriteWithAsyncFailureAfterResponseWrite()",
+ "public abstract void testRequestContentWriteWithAsyncFailureAfterResponseCloseNoContent()",
+ "public abstract void testRequestContentWriteNondeterministicException()",
+ "public abstract void testRequestContentWriteExceptionBeforeResponseWrite()",
+ "public abstract void testRequestContentWriteExceptionAfterResponseWrite()",
+ "public abstract void testRequestContentWriteExceptionAfterResponseCloseNoContent()",
+ "public abstract void testRequestContentWriteNondeterministicExceptionWithSyncCompletion()",
+ "public abstract void testRequestContentWriteExceptionBeforeResponseWriteWithSyncCompletion()",
+ "public abstract void testRequestContentWriteExceptionAfterResponseWriteWithSyncCompletion()",
+ "public abstract void testRequestContentWriteExceptionAfterResponseCloseNoContentWithSyncCompletion()",
+ "public abstract void testRequestContentWriteNondeterministicExceptionWithAsyncCompletion()",
+ "public abstract void testRequestContentWriteExceptionBeforeResponseWriteWithAsyncCompletion()",
+ "public abstract void testRequestContentWriteExceptionAfterResponseWriteWithAsyncCompletion()",
+ "public abstract void testRequestContentWriteExceptionAfterResponseCloseNoContentWithAsyncCompletion()",
+ "public abstract void testRequestContentWriteExceptionWithNondeterministicSyncFailure()",
+ "public abstract void testRequestContentWriteExceptionWithSyncFailureBeforeResponseWrite()",
+ "public abstract void testRequestContentWriteExceptionWithSyncFailureAfterResponseWrite()",
+ "public abstract void testRequestContentWriteExceptionWithSyncFailureAfterResponseCloseNoContent()",
+ "public abstract void testRequestContentWriteExceptionWithNondeterministicAsyncFailure()",
+ "public abstract void testRequestContentWriteExceptionWithAsyncFailureBeforeResponseWrite()",
+ "public abstract void testRequestContentWriteExceptionWithAsyncFailureAfterResponseWrite()",
+ "public abstract void testRequestContentWriteExceptionWithAsyncFailureAfterResponseCloseNoContent()",
+ "public abstract void testRequestContentCloseWithSyncCompletion()",
+ "public abstract void testRequestContentCloseWithAsyncCompletion()",
+ "public abstract void testRequestContentCloseWithNondeterministicSyncFailure()",
+ "public abstract void testRequestContentCloseWithSyncFailureBeforeResponseWrite()",
+ "public abstract void testRequestContentCloseWithSyncFailureAfterResponseWrite()",
+ "public abstract void testRequestContentCloseWithSyncFailureAfterResponseCloseNoContent()",
+ "public abstract void testRequestContentCloseWithNondeterministicAsyncFailure()",
+ "public abstract void testRequestContentCloseWithAsyncFailureBeforeResponseWrite()",
+ "public abstract void testRequestContentCloseWithAsyncFailureAfterResponseWrite()",
+ "public abstract void testRequestContentCloseWithAsyncFailureAfterResponseCloseNoContent()",
+ "public abstract void testRequestContentCloseNondeterministicException()",
+ "public abstract void testRequestContentCloseExceptionBeforeResponseWrite()",
+ "public abstract void testRequestContentCloseExceptionAfterResponseWrite()",
+ "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContent()",
+ "public abstract void testRequestContentCloseNondeterministicExceptionWithSyncCompletion()",
+ "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithSyncCompletion()",
+ "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithSyncCompletion()",
+ "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithSyncCompletion()",
+ "public abstract void testRequestContentCloseNondeterministicExceptionWithAsyncCompletion()",
+ "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithAsyncCompletion()",
+ "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithAsyncCompletion()",
+ "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithAsyncCompletion()",
+ "public abstract void testRequestContentCloseNondeterministicExceptionWithSyncFailure()",
+ "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithSyncFailure()",
+ "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithSyncFailure()",
+ "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithSyncFailure()",
+ "public abstract void testRequestContentCloseNondeterministicExceptionWithAsyncFailure()",
+ "public abstract void testRequestContentCloseExceptionBeforeResponseWriteWithAsyncFailure()",
+ "public abstract void testRequestContentCloseExceptionAfterResponseWriteWithAsyncFailure()",
+ "public abstract void testRequestContentCloseExceptionAfterResponseCloseNoContentWithAsyncFailure()",
+ "public abstract void testResponseWriteCompletionException()",
+ "public abstract void testResponseCloseCompletionException()",
+ "public abstract void testResponseCloseCompletionExceptionNoContent()",
+ "protected varargs void runTest(com.yahoo.jdisc.test.ServerProviderConformanceTest$Adapter, com.google.inject.Module[])"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.TestDriver": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.application.ContainerActivator",
+ "com.yahoo.jdisc.service.CurrentContainer"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public com.yahoo.jdisc.application.ContainerBuilder newContainerBuilder()",
+ "public com.yahoo.jdisc.application.DeactivatedContainer activateContainer(com.yahoo.jdisc.application.ContainerBuilder)",
+ "public com.yahoo.jdisc.Container newReference(java.net.URI)",
+ "public com.yahoo.jdisc.core.BootstrapLoader bootstrapLoader()",
+ "public com.yahoo.jdisc.application.Application application()",
+ "public com.yahoo.jdisc.application.OsgiFramework osgiFramework()",
+ "public com.yahoo.jdisc.handler.ContentChannel connectRequest(java.lang.String, com.yahoo.jdisc.handler.ResponseHandler)",
+ "public java.util.concurrent.Future dispatchRequest(java.lang.String, com.yahoo.jdisc.handler.ResponseHandler)",
+ "public void scheduleClose()",
+ "public boolean awaitClose(long, java.util.concurrent.TimeUnit)",
+ "public boolean close()",
+ "public com.yahoo.jdisc.handler.RequestDispatch newRequestDispatch(java.lang.String, com.yahoo.jdisc.handler.ResponseHandler)",
+ "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstance(java.lang.Class, com.google.inject.Module[])",
+ "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstanceWithoutOsgi(java.lang.Class, com.google.inject.Module[])",
+ "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstance(com.yahoo.jdisc.application.Application, com.google.inject.Module[])",
+ "public static varargs com.yahoo.jdisc.test.TestDriver newInjectedApplicationInstanceWithoutOsgi(com.yahoo.jdisc.application.Application, com.google.inject.Module[])",
+ "public static varargs com.yahoo.jdisc.test.TestDriver newSimpleApplicationInstance(com.google.inject.Module[])",
+ "public static varargs com.yahoo.jdisc.test.TestDriver newSimpleApplicationInstanceWithoutOsgi(com.google.inject.Module[])",
+ "public static varargs com.yahoo.jdisc.test.TestDriver newApplicationBundleInstance(java.lang.String, boolean, com.google.inject.Module[])",
+ "public static varargs com.yahoo.jdisc.test.TestDriver newInstance(com.yahoo.jdisc.application.OsgiFramework, java.lang.String, boolean, com.google.inject.Module[])",
+ "public static com.yahoo.jdisc.core.FelixFramework newOsgiFramework()",
+ "public static com.yahoo.jdisc.application.OsgiFramework newNonWorkingOsgiFramework()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.test.TestTimer": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.jdisc.Timer"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void <init>(java.time.Instant)",
+ "public void setMillis(long)",
+ "public void advanceMillis(long)",
+ "public void advanceSeconds(long)",
+ "public void advanceMinutes(long)",
+ "public void advance(java.time.Duration)",
+ "public java.time.Instant currentTime()",
+ "public long currentTimeMillis()"
+ ],
+ "fields": []
+ }
+} \ No newline at end of file
diff --git a/jdisc_core/pom.xml b/jdisc_core/pom.xml
index caa97b4e7e6..4a68c8e0cac 100644
--- a/jdisc_core/pom.xml
+++ b/jdisc_core/pom.xml
@@ -171,6 +171,10 @@
<build>
<plugins>
<plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>abi-check-plugin</artifactId>
+ </plugin>
+ <plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java
index 3c2ab8cead2..df5044de05c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java
@@ -2,10 +2,9 @@
package com.yahoo.vespa.hosted.provision.applications;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ClusterResources;
+import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterSpec;
import java.util.Collection;
-import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.util.Optional;
@@ -59,12 +58,12 @@ public class Application {
* Returns an application with the given cluster having the min and max resource limits of the given cluster.
* If the cluster has a target which is not inside the new limits, the target is removed.
*/
- public Application withCluster(ClusterSpec.Id id, boolean exclusive, ClusterResources min, ClusterResources max) {
+ public Application withCluster(ClusterSpec.Id id, boolean exclusive, Capacity requested) {
Cluster cluster = clusters.get(id);
if (cluster == null)
- cluster = new Cluster(id, exclusive, min, max, Optional.empty(), Optional.empty(), List.of(), AutoscalingStatus.empty());
+ cluster = Cluster.create(id, exclusive, requested);
else
- cluster = cluster.withConfiguration(exclusive, min, max);
+ cluster = cluster.withConfiguration(exclusive, requested);
return with(cluster);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
index 5478999e4fe..e0ccbe10b10 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.applications;
+import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
@@ -25,6 +26,7 @@ public class Cluster {
private final ClusterSpec.Id id;
private final boolean exclusive;
private final ClusterResources min, max;
+ private boolean required;
private final Optional<Suggestion> suggested;
private final Optional<ClusterResources> target;
@@ -36,6 +38,7 @@ public class Cluster {
boolean exclusive,
ClusterResources minResources,
ClusterResources maxResources,
+ boolean required,
Optional<Suggestion> suggestedResources,
Optional<ClusterResources> targetResources,
List<ScalingEvent> scalingEvents,
@@ -44,6 +47,7 @@ public class Cluster {
this.exclusive = exclusive;
this.min = Objects.requireNonNull(minResources);
this.max = Objects.requireNonNull(maxResources);
+ this.required = required;
this.suggested = Objects.requireNonNull(suggestedResources);
Objects.requireNonNull(targetResources);
if (targetResources.isPresent() && ! targetResources.get().isWithin(minResources, maxResources))
@@ -56,14 +60,20 @@ public class Cluster {
public ClusterSpec.Id id() { return id; }
+ /** Returns whether the nodes allocated to this cluster must be on host exclusively dedicated to this application */
+ public boolean exclusive() { return exclusive; }
+
/** Returns the configured minimal resources in this cluster */
public ClusterResources minResources() { return min; }
/** Returns the configured maximal resources in this cluster */
public ClusterResources maxResources() { return max; }
- /** Returns whether the nodes allocated to this cluster must be on host exclusively dedicated to this application */
- public boolean exclusive() { return exclusive; }
+ /**
+ * Returns whether the resources of this cluster are required to be within the specified min and max.
+ * Otherwise they may be adjusted by capacity policies.
+ */
+ public boolean required() { return required; }
/**
* Returns the computed resources (between min and max, inclusive) this cluster should
@@ -97,16 +107,18 @@ public class Cluster {
/** The latest autoscaling status of this cluster, or unknown (never null) if none */
public AutoscalingStatus autoscalingStatus() { return autoscalingStatus; }
- public Cluster withConfiguration(boolean exclusive, ClusterResources min, ClusterResources max) {
- return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus);
+ public Cluster withConfiguration(boolean exclusive, Capacity capacity) {
+ return new Cluster(id, exclusive,
+ capacity.minResources(), capacity.maxResources(), capacity.isRequired(),
+ suggested, target, scalingEvents, autoscalingStatus);
}
public Cluster withSuggested(Optional<Suggestion> suggested) {
- return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus);
+ return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus);
}
public Cluster withTarget(Optional<ClusterResources> target) {
- return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus);
+ return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus);
}
/** Add or update (based on "at" time) a scaling event */
@@ -120,12 +132,12 @@ public class Cluster {
scalingEvents.add(scalingEvent);
prune(scalingEvents);
- return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus);
+ return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus);
}
public Cluster with(AutoscalingStatus autoscalingStatus) {
if (autoscalingStatus.equals(this.autoscalingStatus)) return this;
- return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus);
+ return new Cluster(id, exclusive, min, max, required, suggested, target, scalingEvents, autoscalingStatus);
}
@Override
@@ -156,6 +168,11 @@ public class Cluster {
return -1;
}
+ public static Cluster create(ClusterSpec.Id id, boolean exclusive, Capacity requested) {
+ return new Cluster(id, exclusive, requested.minResources(), requested.maxResources(), requested.isRequired(),
+ Optional.empty(), Optional.empty(), List.of(), AutoscalingStatus.empty());
+ }
+
public static class Suggestion {
private final ClusterResources resources;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
index f1e707be7b4..078b0621a99 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
@@ -8,6 +8,7 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
import com.yahoo.vespa.hosted.provision.provisioning.NodeResourceLimits;
import java.util.List;
@@ -54,14 +55,14 @@ public class AllocatableClusterResources {
public AllocatableClusterResources(ClusterResources realResources,
NodeResources advertisedResources,
- NodeResources idealResources,
+ ClusterResources idealResources,
ClusterSpec clusterSpec) {
this.nodes = realResources.nodes();
this.groups = realResources.groups();
this.realResources = realResources.nodeResources();
this.advertisedResources = advertisedResources;
this.clusterSpec = clusterSpec;
- this.fulfilment = fulfilment(realResources.nodeResources(), idealResources);
+ this.fulfilment = fulfilment(realResources, idealResources);
}
/**
@@ -99,10 +100,10 @@ public class AllocatableClusterResources {
*/
public double fulfilment() { return fulfilment; }
- private static double fulfilment(NodeResources realResources, NodeResources idealResources) {
- double vcpuFulfilment = Math.min(1, realResources.vcpu() / idealResources.vcpu());
- double memoryGbFulfilment = Math.min(1, realResources.memoryGb() / idealResources.memoryGb());
- double diskGbFulfilment = Math.min(1, realResources.diskGb() / idealResources.diskGb());
+ private static double fulfilment(ClusterResources realResources, ClusterResources idealResources) {
+ double vcpuFulfilment = Math.min(1, realResources.totalResources().vcpu() / idealResources.totalResources().vcpu());
+ double memoryGbFulfilment = Math.min(1, realResources.totalResources().memoryGb() / idealResources.totalResources().memoryGb());
+ double diskGbFulfilment = Math.min(1, realResources.totalResources().diskGb() / idealResources.totalResources().diskGb());
return (vcpuFulfilment + memoryGbFulfilment + diskGbFulfilment) / 3;
}
@@ -138,21 +139,25 @@ public class AllocatableClusterResources {
public static Optional<AllocatableClusterResources> from(ClusterResources wantedResources,
ClusterSpec clusterSpec,
Limits applicationLimits,
+ boolean required,
NodeList hosts,
NodeRepository nodeRepository) {
+ var capacityPolicies = new CapacityPolicies(nodeRepository);
var systemLimits = new NodeResourceLimits(nodeRepository);
boolean exclusive = clusterSpec.isExclusive();
+ int actualNodes = capacityPolicies.decideSize(wantedResources.nodes(), required, true, false, clusterSpec);
if ( !clusterSpec.isExclusive() && !nodeRepository.zone().getCloud().dynamicProvisioning()) {
// We decide resources: Add overhead to what we'll request (advertised) to make sure real becomes (at least) cappedNodeResources
var advertisedResources = nodeRepository.resourcesCalculator().realToRequest(wantedResources.nodeResources(), exclusive);
advertisedResources = systemLimits.enlargeToLegal(advertisedResources, clusterSpec.type(), exclusive); // Ask for something legal
advertisedResources = applicationLimits.cap(advertisedResources); // Overrides other conditions, even if it will then fail
+ advertisedResources = capacityPolicies.decideNodeResources(advertisedResources, required, clusterSpec); // Adjust to what we can request
var realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive); // What we'll really get
if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type())) return Optional.empty();
if (matchesAny(hosts, advertisedResources))
- return Optional.of(new AllocatableClusterResources(wantedResources.with(realResources),
+ return Optional.of(new AllocatableClusterResources(wantedResources.withNodes(actualNodes).with(realResources),
advertisedResources,
- wantedResources.nodeResources(),
+ wantedResources,
clusterSpec));
else
return Optional.empty();
@@ -163,6 +168,7 @@ public class AllocatableClusterResources {
for (Flavor flavor : nodeRepository.flavors().getFlavors()) {
// Flavor decide resources: Real resources are the worst case real resources we'll get if we ask for these advertised resources
NodeResources advertisedResources = nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor);
+ advertisedResources = capacityPolicies.decideNodeResources(advertisedResources, required, clusterSpec); // Adjust to what we can get
NodeResources realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive);
// Adjust where we don't need exact match to the flavor
@@ -178,9 +184,9 @@ public class AllocatableClusterResources {
if ( ! between(applicationLimits.min().nodeResources(), applicationLimits.max().nodeResources(), advertisedResources)) continue;
if ( ! systemLimits.isWithinRealLimits(realResources, clusterSpec.type())) continue;
- var candidate = new AllocatableClusterResources(wantedResources.with(realResources),
+ var candidate = new AllocatableClusterResources(wantedResources.withNodes(actualNodes).with(realResources),
advertisedResources,
- wantedResources.nodeResources(),
+ wantedResources,
clusterSpec);
if (best.isEmpty() || candidate.preferableTo(best.get()))
best = Optional.of(candidate);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index 6fd9801164a..b8a80a9bd2b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -65,7 +65,9 @@ public class AllocationOptimizer {
nodeResourcesWith(nodesAdjustedForRedundancy,
groupsAdjustedForRedundancy,
limits, target, current, clusterModel));
- var allocatableResources = AllocatableClusterResources.from(next, current.clusterSpec(), limits, hosts, nodeRepository);
+ var allocatableResources = AllocatableClusterResources.from(next, current.clusterSpec(), limits,
+ clusterModel.cluster().required(),
+ hosts, nodeRepository);
if (allocatableResources.isEmpty()) continue;
if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get()))
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index 35aafd3e0f4..1001ab83cc0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -32,6 +32,7 @@ public class ClusterModel {
static final double idealDiskLoad = 0.6;
private final Application application;
+ private final Cluster cluster;
/** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */
private final NodeList nodes;
private final Clock clock;
@@ -50,6 +51,7 @@ public class ClusterModel {
MetricsDb metricsDb,
Clock clock) {
this.application = application;
+ this.cluster = cluster;
this.nodes = clusterNodes;
this.clock = clock;
this.scalingDuration = computeScalingDuration(cluster, clusterSpec);
@@ -65,6 +67,7 @@ public class ClusterModel {
ClusterTimeseries clusterTimeseries,
ClusterNodesTimeseries nodeTimeseries) {
this.application = application;
+ this.cluster = cluster;
this.nodes = null;
this.clock = clock;
@@ -73,6 +76,8 @@ public class ClusterModel {
this.nodeTimeseries = nodeTimeseries;
}
+ public Cluster cluster() { return cluster; }
+
/** Returns the predicted duration of a rescaling of this cluster */
public Duration scalingDuration() { return scalingDuration; }
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
index 9cdb4c69b97..2289ba4a0ea 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
@@ -48,6 +48,7 @@ public class ApplicationSerializer {
private static final String exclusiveKey = "exclusive";
private static final String minResourcesKey = "min";
private static final String maxResourcesKey = "max";
+ private static final String requiredKey = "required";
private static final String suggestedKey = "suggested";
private static final String resourcesKey = "resources";
private static final String targetResourcesKey = "target";
@@ -99,7 +100,6 @@ public class ApplicationSerializer {
}
private static Status statusFromSlime(Inspector statusObject) {
- if ( ! statusObject.valid()) return Status.initial(); // TODO: Remove this line after March 2021
return new Status(statusObject.field(currentReadShareKey).asDouble(),
statusObject.field(maxReadShareKey).asDouble());
}
@@ -118,6 +118,7 @@ public class ApplicationSerializer {
clusterObject.setBool(exclusiveKey, cluster.exclusive());
toSlime(cluster.minResources(), clusterObject.setObject(minResourcesKey));
toSlime(cluster.maxResources(), clusterObject.setObject(maxResourcesKey));
+ clusterObject.setBool(requiredKey, cluster.required());
cluster.suggestedResources().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject(suggestedKey)));
cluster.targetResources().ifPresent(target -> toSlime(target, clusterObject.setObject(targetResourcesKey)));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray(scalingEventsKey));
@@ -130,6 +131,7 @@ public class ApplicationSerializer {
clusterObject.field(exclusiveKey).asBool(),
clusterResourcesFromSlime(clusterObject.field(minResourcesKey)),
clusterResourcesFromSlime(clusterObject.field(maxResourcesKey)),
+ clusterObject.field(requiredKey).asBool(),
optionalSuggestionFromSlime(clusterObject.field(suggestedKey)),
optionalClusterResourcesFromSlime(clusterObject.field(targetResourcesKey)),
scalingEventsFromSlime(clusterObject.field(scalingEventsKey)),
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 839bc21827c..0c2c3c48df1 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -29,11 +29,11 @@ public class CapacityPolicies {
this.sharedHosts = type -> PermanentFlags.SHARED_HOST.bindTo(nodeRepository.flagSource()).value().isEnabled(type.name());
}
- public int decideSize(int requested, Capacity capacity, ClusterSpec cluster, ApplicationId application) {
- if (application.instance().isTester()) return 1;
+ public int decideSize(int requested, boolean required, boolean canFail, boolean isTester, ClusterSpec cluster) {
+ if (isTester) return 1;
- ensureRedundancy(requested, cluster, capacity.canFail());
- if (capacity.isRequired()) return requested;
+ ensureRedundancy(requested, cluster, canFail);
+ if (required) return requested;
switch(zone.environment()) {
case dev : case test : return 1;
case perf : return Math.min(requested, 3);
@@ -43,11 +43,11 @@ public class CapacityPolicies {
}
}
- public NodeResources decideNodeResources(NodeResources target, Capacity capacity, ClusterSpec cluster) {
+ public NodeResources decideNodeResources(NodeResources target, boolean required, ClusterSpec cluster) {
if (target.isUnspecified())
target = defaultNodeResources(cluster.type());
- if (capacity.isRequired()) return target;
+ if (required) return target;
// Dev does not cap the cpu or network of containers since usage is spotty: Allocate just a small amount exclusively
if (zone.environment() == Environment.dev && !zone.getCloud().dynamicProvisioning())
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 0ab04a1a73d..b35b0a5e301 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -97,9 +97,13 @@ public class NodeRepositoryProvisioner implements Provisioner {
NodeSpec nodeSpec;
if (requested.type() == NodeType.tenant) {
ClusterResources target = decideTargetResources(application, cluster, requested);
- int nodeCount = capacityPolicies.decideSize(target.nodes(), requested, cluster, application);
+ int nodeCount = capacityPolicies.decideSize(target.nodes(),
+ requested.isRequired(),
+ requested.canFail(),
+ application.instance().isTester(),
+ cluster);
groups = Math.min(target.groups(), nodeCount); // cannot have more groups than nodes
- resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested, cluster);
+ resources = capacityPolicies.decideNodeResources(target.nodeResources(), requested.isRequired(), cluster);
boolean exclusive = capacityPolicies.decideExclusivity(requested, cluster.isExclusive());
nodeSpec = NodeSpec.from(nodeCount, resources, exclusive, requested.canFail());
logIfDownscaled(target.nodes(), nodeCount, cluster, logger);
@@ -141,7 +145,7 @@ public class NodeRepositoryProvisioner implements Provisioner {
private ClusterResources decideTargetResources(ApplicationId applicationId, ClusterSpec clusterSpec, Capacity requested) {
try (Mutex lock = nodeRepository.nodes().lock(applicationId)) {
var application = nodeRepository.applications().get(applicationId).orElse(Application.empty(applicationId))
- .withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested.minResources(), requested.maxResources());
+ .withCluster(clusterSpec.id(), clusterSpec.isExclusive(), requested);
nodeRepository.applications().put(application, lock);
var cluster = application.cluster(clusterSpec.id()).get();
return cluster.targetResources().orElseGet(() -> currentResources(application, clusterSpec, cluster, requested));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
index 4a0c2012ae4..667dffef2a6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingIntegrationTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.provision.autoscale;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
@@ -55,7 +56,7 @@ public class AutoscalingIntegrationTest {
ClusterResources max = new ClusterResources(2, 1, nodes);
Application application = tester.nodeRepository().applications().get(application1).orElse(Application.empty(application1))
- .withCluster(cluster1.id(), false, min, max);
+ .withCluster(cluster1.id(), false, Capacity.from(min, max));
try (Mutex lock = tester.nodeRepository().nodes().lock(application1)) {
tester.nodeRepository().applications().put(application, lock);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index a3c7b7d2d2b..601a7109533 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -41,6 +41,7 @@ public class AutoscalingTest {
new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any));
ClusterResources max = new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
@@ -50,10 +51,10 @@ public class AutoscalingTest {
tester.deploy(application1, cluster1, 5, 1, hostResources);
tester.clock().advance(Duration.ofDays(1));
- assertTrue("No measurements -> No change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
+ assertTrue("No measurements -> No change", tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
tester.addCpuMeasurements(0.25f, 1f, 59, application1);
- assertTrue("Too few measurements -> No change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
+ assertTrue("Too few measurements -> No change", tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
tester.clock().advance(Duration.ofDays(1));
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
@@ -61,10 +62,10 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high",
15, 1, 1.2, 28.6, 28.6,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.deploy(application1, cluster1, scaledResources);
- assertTrue("Cluster in flux -> No further change", tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
+ assertTrue("Cluster in flux -> No further change", tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
tester.deactivateRetired(application1, cluster1, scaledResources);
@@ -73,19 +74,19 @@ public class AutoscalingTest {
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
assertTrue("Load change is large, but insufficient measurements for new config -> No change",
- tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
+ tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
tester.addCpuMeasurements(0.19f, 1f, 100, application1);
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
- assertEquals("Load change is small -> No change", Optional.empty(), tester.autoscale(application1, cluster1.id(), min, max).target());
+ assertEquals("Load change is small -> No change", Optional.empty(), tester.autoscale(application1, cluster1.id(), capacity).target());
tester.addCpuMeasurements(0.1f, 1f, 120, application1);
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling down to minimum since usage has gone down significantly",
7, 1, 1.0, 66.7, 66.7,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
var events = tester.nodeRepository().applications().get(application1).get().cluster(cluster1.id()).get().scalingEvents();
}
@@ -96,6 +97,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -108,7 +110,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
ClusterResources scaledResources = tester.assertResources("Scaling up since cpu usage is too high",
7, 1, 2.5, 80.0, 80.0,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.deploy(application1, cluster1, scaledResources);
tester.deactivateRetired(application1, cluster1, scaledResources);
@@ -118,7 +120,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling down since cpu usage has gone down",
4, 1, 2.5, 68.6, 68.6,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -142,9 +144,10 @@ public class AutoscalingTest {
new NodeResources(1, 1, 1, 1, NodeResources.DiskSpeed.any));
ClusterResources max = new ClusterResources(20, 1,
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any));
+ var capacity = Capacity.from(min, max);
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high",
14, 1, 1.4, 30.8, 30.8,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
assertEquals("Disk speed from min/max is used",
NodeResources.DiskSpeed.any, scaledResources.nodeResources().diskSpeed());
tester.deploy(application1, cluster1, scaledResources);
@@ -164,6 +167,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(1, 10, 10, 1);
var min = new ClusterResources( 2, 1, resources.with(NodeResources.DiskSpeed.any));
var max = new ClusterResources( 10, 1, resources.with(NodeResources.DiskSpeed.any));
+ var capacity = Capacity.from(min, max);
tester.deploy(application1, cluster1, Capacity.from(min, max));
// Redeployment without target: Uses current resource numbers with *requested* non-numbers (i.e disk-speed any)
@@ -176,7 +180,7 @@ public class AutoscalingTest {
// Autoscaling: Uses disk-speed any as well
tester.clock().advance(Duration.ofDays(2));
tester.addCpuMeasurements(0.8f, 1f, 120, application1);
- Autoscaler.Advice advice = tester.autoscale(application1, cluster1.id(), min, max);
+ Autoscaler.Advice advice = tester.autoscale(application1, cluster1.id(), capacity);
assertEquals(NodeResources.DiskSpeed.any, advice.target().get().nodeResources().diskSpeed());
@@ -187,6 +191,7 @@ public class AutoscalingTest {
NodeResources hostResources = new NodeResources(6, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
@@ -200,7 +205,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up to limit since resource usage is too high",
6, 1, 2.4, 78.0, 79.0,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -208,6 +213,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 4, 1, new NodeResources(1.8, 7.4, 8.5, 1));
ClusterResources max = new ClusterResources( 6, 1, new NodeResources(2.4, 78, 79, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -218,7 +224,7 @@ public class AutoscalingTest {
tester.addMeasurements(0.05f, 0.05f, 0.05f, 0, 120, application1);
tester.assertResources("Scaling down to limit since resource usage is low",
4, 1, 1.8, 7.7, 10.0,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -226,6 +232,7 @@ public class AutoscalingTest {
NodeResources hostResources = new NodeResources(6, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, NodeResources.unspecified());
ClusterResources max = new ClusterResources( 6, 1, NodeResources.unspecified());
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
@@ -244,7 +251,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up to limit since resource usage is too high",
4, 1, defaultResources,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -252,6 +259,7 @@ public class AutoscalingTest {
NodeResources hostResources = new NodeResources(30.0, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(18, 6, new NodeResources(100, 1000, 1000, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
@@ -264,7 +272,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up since resource usage is too high",
6, 6, 3.6, 8.0, 10.0,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -272,6 +280,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = min;
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(resources);
ApplicationId application1 = tester.applicationId("application1");
@@ -281,13 +290,13 @@ public class AutoscalingTest {
tester.deploy(application1, cluster1, 5, 1, resources);
tester.clock().advance(Duration.ofDays(1));
tester.addCpuMeasurements(0.25f, 1f, 120, application1);
- assertTrue(tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
+ assertTrue(tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
}
@Test
public void prefers_remote_disk_when_no_local_match() {
- NodeResources resources = new NodeResources(3, 100, 100, 1);
- ClusterResources min = new ClusterResources( 2, 1, new NodeResources(3, 100, 50, 1));
+ NodeResources resources = new NodeResources(3, 100, 50, 1);
+ ClusterResources min = new ClusterResources( 2, 1, resources);
ClusterResources max = min;
// AutoscalingTester hardcodes 3Gb memory overhead:
Flavor localFlavor = new Flavor("local", new NodeResources(3, 97, 75, 1, DiskSpeed.fast, StorageType.local));
@@ -341,6 +350,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources(2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(5, 1, new NodeResources(100, 1000, 1000, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -350,7 +360,7 @@ public class AutoscalingTest {
tester.deploy(application1, cluster1, 2, 1, resources);
tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, false, true, 120, application1);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
- tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
+ tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
}
@Test
@@ -358,6 +368,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources(2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(5, 1, new NodeResources(100, 1000, 1000, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -367,7 +378,7 @@ public class AutoscalingTest {
tester.deploy(application1, cluster1, 2, 1, resources);
tester.addMeasurements(0.5f, 0.6f, 0.7f, 1, true, false, 120, application1);
assertTrue("Not scaling up since nodes were measured while cluster was unstable",
- tester.autoscale(application1, cluster1.id(), min, max).isEmpty());
+ tester.autoscale(application1, cluster1.id(), capacity).isEmpty());
}
@Test
@@ -375,6 +386,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 20, new NodeResources(100, 1000, 1000, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -387,7 +399,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up since resource usage is too high",
7, 7, 2.5, 80.0, 80.0,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -395,6 +407,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 3, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(21, 7, new NodeResources(100, 1000, 1000, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -409,7 +422,7 @@ public class AutoscalingTest {
t -> 1.0);
tester.assertResources("Scaling up since resource usage is too high, changing to 1 group is cheaper",
8, 1, 2.6, 83.3, 83.3,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
/** Same as above but mostly write traffic, which favors smaller groups */
@@ -418,6 +431,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 3, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(21, 7, new NodeResources(100, 1000, 1000, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -432,14 +446,15 @@ public class AutoscalingTest {
t -> 100.0);
tester.assertResources("Scaling down since resource usage is too high, changing to 1 group is cheaper",
4, 1, 2.1, 83.3, 83.3,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
public void test_autoscaling_group_size() {
NodeResources hostResources = new NodeResources(100, 1000, 1000, 100);
- ClusterResources min = new ClusterResources( 3, 2, new NodeResources(1, 1, 1, 1));
+ ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(30, 30, new NodeResources(100, 100, 1000, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
@@ -453,7 +468,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Increase group size to reduce memory load",
8, 2, 12.4, 96.2, 62.5,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -461,6 +476,7 @@ public class AutoscalingTest {
NodeResources hostResources = new NodeResources(6, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
@@ -473,7 +489,7 @@ public class AutoscalingTest {
tester.addMemMeasurements(0.02f, 0.95f, 120, application1);
tester.assertResources("Scaling down",
6, 1, 2.9, 4.0, 95.0,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -481,6 +497,7 @@ public class AutoscalingTest {
NodeResources hostResources = new NodeResources(6, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(hostResources);
ApplicationId application1 = tester.applicationId("application1");
@@ -492,7 +509,7 @@ public class AutoscalingTest {
tester.addMemMeasurements(0.02f, 0.95f, 120, application1);
tester.clock().advance(Duration.ofMinutes(-10 * 5));
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
- assertTrue(tester.autoscale(application1, cluster1.id(), min, max).target().isEmpty());
+ assertTrue(tester.autoscale(application1, cluster1.id(), capacity).target().isEmpty());
// Trying the same later causes autoscaling
tester.clock().advance(Duration.ofDays(2));
@@ -501,7 +518,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling down",
6, 1, 1.4, 4.0, 95.0,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -509,9 +526,11 @@ public class AutoscalingTest {
NodeResources hostResources = new NodeResources(60, 100, 1000, 10);
ClusterResources min = new ClusterResources(2, 1, new NodeResources( 2, 20, 200, 1));
ClusterResources max = new ClusterResources(4, 1, new NodeResources(60, 100, 1000, 1));
+ var capacity = Capacity.from(min, max);
{ // No memory tax
- AutoscalingTester tester = new AutoscalingTester(hostResources, new OnlySubtractingWhenForecastingCalculator(0));
+ AutoscalingTester tester = new AutoscalingTester(Environment.prod, hostResources,
+ new OnlySubtractingWhenForecastingCalculator(0));
ApplicationId application1 = tester.applicationId("app1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
@@ -522,11 +541,12 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up",
4, 1, 6.7, 20.5, 200,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
{ // 15 Gb memory tax
- AutoscalingTester tester = new AutoscalingTester(hostResources, new OnlySubtractingWhenForecastingCalculator(15));
+ AutoscalingTester tester = new AutoscalingTester(Environment.prod, hostResources,
+ new OnlySubtractingWhenForecastingCalculator(15));
ApplicationId application1 = tester.applicationId("app1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.content, "cluster1");
@@ -537,7 +557,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling up",
4, 1, 6.7, 35.5, 200,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
}
@@ -545,6 +565,7 @@ public class AutoscalingTest {
public void test_autoscaling_with_dynamic_provisioning() {
ClusterResources min = new ClusterResources( 2, 1, new NodeResources(1, 1, 1, 1));
ClusterResources max = new ClusterResources(20, 1, new NodeResources(100, 1000, 1000, 1));
+ var capacity = Capacity.from(min, max);
List<Flavor> flavors = new ArrayList<>();
flavors.add(new Flavor("aws-xlarge", new NodeResources(3, 200, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)));
flavors.add(new Flavor("aws-large", new NodeResources(3, 150, 100, 1, NodeResources.DiskSpeed.fast, NodeResources.StorageType.remote)));
@@ -567,7 +588,7 @@ public class AutoscalingTest {
tester.addMemMeasurements(0.9f, 0.6f, 120, application1);
ClusterResources scaledResources = tester.assertResources("Scaling up since resource usage is too high.",
8, 1, 3, 83, 34.3,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.deploy(application1, cluster1, scaledResources);
tester.deactivateRetired(application1, cluster1, scaledResources);
@@ -578,7 +599,7 @@ public class AutoscalingTest {
tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
tester.assertResources("Scaling down since resource usage has gone down",
5, 1, 3, 83, 36.0,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -586,6 +607,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(3, 100, 100, 1);
ClusterResources min = new ClusterResources( 1, 1, resources);
ClusterResources max = new ClusterResources(10, 1, resources);
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -599,17 +621,17 @@ public class AutoscalingTest {
// (no read share stored)
tester.assertResources("Advice to scale up since we set aside for bcp by default",
7, 1, 3, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.storeReadShare(0.25, 0.5, application1);
tester.assertResources("Half of global share is the same as the default assumption used above",
7, 1, 3, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.storeReadShare(0.5, 0.5, application1);
tester.assertResources("Advice to scale down since we don't need room for bcp",
4, 1, 3, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@@ -620,6 +642,7 @@ public class AutoscalingTest {
NodeResources maxResources = new NodeResources(10, 100, 100, 1);
ClusterResources min = new ClusterResources(5, 1, minResources);
ClusterResources max = new ClusterResources(5, 1, maxResources);
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(maxResources.withVcpu(maxResources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -633,7 +656,7 @@ public class AutoscalingTest {
// (no query rate data)
tester.assertResources("Scale up since we assume we need 2x cpu for growth when no data scaling time data",
5, 1, 6.3, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.setScalingDuration(application1, cluster1.id(), Duration.ofMinutes(5));
tester.addQueryRateMeasurements(application1, cluster1.id(),
@@ -643,7 +666,7 @@ public class AutoscalingTest {
tester.addCpuMeasurements(0.25f, 1f, 100, application1);
tester.assertResources("Scale down since observed growth is slower than scaling time",
5, 1, 3.4, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.clearQueryRateMeasurements(application1, cluster1.id());
@@ -655,7 +678,7 @@ public class AutoscalingTest {
tester.addCpuMeasurements(0.25f, 1f, 100, application1);
tester.assertResources("Scale up since observed growth is faster than scaling time",
5, 1, 10.0, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -665,6 +688,7 @@ public class AutoscalingTest {
NodeResources maxResources = new NodeResources(10, 100, 100, 1);
ClusterResources min = new ClusterResources(5, 1, minResources);
ClusterResources max = new ClusterResources(5, 1, maxResources);
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(maxResources.withVcpu(maxResources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
@@ -681,35 +705,35 @@ public class AutoscalingTest {
tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t -> 10.0);
tester.assertResources("Query and write load is equal -> scale up somewhat",
5, 1, 7.3, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.addCpuMeasurements(0.4f, 1f, 100, application1);
tester.clock().advance(Duration.ofMinutes(-100 * 5));
tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 80.0 : 40.0, t -> 10.0);
tester.assertResources("Query load is 4x write load -> scale up more",
5, 1, 9.5, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.addCpuMeasurements(0.3f, 1f, 100, application1);
tester.clock().advance(Duration.ofMinutes(-100 * 5));
tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t -> 100.0);
tester.assertResources("Write load is 10x query load -> scale down",
5, 1, 2.9, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.addCpuMeasurements(0.4f, 1f, 100, application1);
tester.clock().advance(Duration.ofMinutes(-100 * 5));
tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> t == 0 ? 20.0 : 10.0, t-> 0.0);
tester.assertResources("Query only -> largest possible",
5, 1, 10.0, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
tester.addCpuMeasurements(0.4f, 1f, 100, application1);
tester.clock().advance(Duration.ofMinutes(-100 * 5));
tester.addLoadMeasurements(application1, cluster1.id(), 100, t -> 0.0, t -> 10.0);
tester.assertResources("Write only -> smallest possible",
5, 1, 2.1, 100, 100,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
@Test
@@ -717,6 +741,7 @@ public class AutoscalingTest {
NodeResources resources = new NodeResources(1, 4, 50, 1);
ClusterResources min = new ClusterResources( 2, 1, resources);
ClusterResources max = new ClusterResources(3, 1, resources);
+ var capacity = Capacity.from(min, max);
AutoscalingTester tester = new AutoscalingTester(resources.withVcpu(resources.vcpu() * 2));
ApplicationId application1 = tester.applicationId("application1");
ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
@@ -728,7 +753,47 @@ public class AutoscalingTest {
tester.assertResources("Advice to scale up since observed growth is much faster than scaling time",
3, 1, 1, 4, 50,
- tester.autoscale(application1, cluster1.id(), min, max).target());
+ tester.autoscale(application1, cluster1.id(), capacity).target());
+ }
+
+ @Test
+ public void test_autoscaling_in_dev() {
+ NodeResources resources = new NodeResources(1, 4, 50, 1);
+ ClusterResources min = new ClusterResources( 1, 1, resources);
+ ClusterResources max = new ClusterResources(3, 1, resources);
+ Capacity capacity = Capacity.from(min, max, false, true);
+
+ AutoscalingTester tester = new AutoscalingTester(Environment.dev, resources.withVcpu(resources.vcpu() * 2));
+ ApplicationId application1 = tester.applicationId("application1");
+ ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
+
+ tester.deploy(application1, cluster1, capacity);
+ tester.addQueryRateMeasurements(application1, cluster1.id(),
+ 500, t -> 100.0);
+ tester.addCpuMeasurements(1.0f, 1f, 10, application1);
+ assertTrue("Not attempting to scale up because policies dictate we'll only get one node",
+ tester.autoscale(application1, cluster1.id(), capacity).target().isEmpty());
+ }
+
+ /** Same setup as test_autoscaling_in_dev(), just with required = true */
+ @Test
+ public void test_autoscaling_in_dev_with_required_resources() {
+ NodeResources resources = new NodeResources(1, 4, 50, 1);
+ ClusterResources min = new ClusterResources( 1, 1, resources);
+ ClusterResources max = new ClusterResources(3, 1, resources);
+ Capacity capacity = Capacity.from(min, max, true, true);
+
+ AutoscalingTester tester = new AutoscalingTester(Environment.dev, resources.withVcpu(resources.vcpu() * 2));
+ ApplicationId application1 = tester.applicationId("application1");
+ ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
+
+ tester.deploy(application1, cluster1, capacity);
+ tester.addQueryRateMeasurements(application1, cluster1.id(),
+ 500, t -> 100.0);
+ tester.addCpuMeasurements(1.0f, 1f, 10, application1);
+ tester.assertResources("We scale up even in dev because resources are required",
+ 3, 1, 1.0, 4, 50,
+ tester.autoscale(application1, cluster1.id(), capacity).target());
}
/**
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
index c45b6caf14c..8d59181a027 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
@@ -48,11 +48,15 @@ class AutoscalingTester {
/** Creates an autoscaling tester with a single host type ready */
public AutoscalingTester(NodeResources hostResources) {
- this(hostResources, null);
+ this(Environment.prod, hostResources);
}
- public AutoscalingTester(NodeResources hostResources, HostResourcesCalculator resourcesCalculator) {
- this(new Zone(Environment.prod, RegionName.from("us-east")), List.of(new Flavor("hostFlavor", hostResources)), resourcesCalculator);
+ public AutoscalingTester(Environment environment, NodeResources hostResources) {
+ this(environment, hostResources, null);
+ }
+
+ public AutoscalingTester(Environment environment, NodeResources hostResources, HostResourcesCalculator resourcesCalculator) {
+ this(new Zone(environment, RegionName.from("us-east")), List.of(new Flavor("hostFlavor", hostResources)), resourcesCalculator);
provisioningTester.makeReadyNodes(20, "hostFlavor", NodeType.host, 8);
provisioningTester.activateTenantHosts();
}
@@ -251,6 +255,7 @@ class AutoscalingTester {
cluster.exclusive(),
cluster.minResources(),
cluster.maxResources(),
+ cluster.required(),
cluster.suggestedResources(),
cluster.targetResources(),
List.of(), // Remove scaling events
@@ -295,10 +300,9 @@ class AutoscalingTester {
((MemoryMetricsDb)nodeMetricsDb()).clearClusterMetrics(application, cluster);
}
- public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId,
- ClusterResources min, ClusterResources max) {
+ public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId, Capacity capacity) {
Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId))
- .withCluster(clusterId, false, min, max);
+ .withCluster(clusterId, false, capacity);
try (Mutex lock = nodeRepository().nodes().lock(applicationId)) {
nodeRepository().applications().put(application, lock);
}
@@ -309,7 +313,7 @@ class AutoscalingTester {
public Autoscaler.Advice suggest(ApplicationId applicationId, ClusterSpec.Id clusterId,
ClusterResources min, ClusterResources max) {
Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId))
- .withCluster(clusterId, false, min, max);
+ .withCluster(clusterId, false, Capacity.from(min, max));
try (Mutex lock = nodeRepository().nodes().lock(applicationId)) {
nodeRepository().applications().put(application, lock);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
index 0e37d953d2d..bd7300ad6bf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
@@ -2,12 +2,12 @@
package com.yahoo.vespa.hosted.provision.autoscale;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.provision.applications.Application;
-import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.Status;
import org.junit.Test;
@@ -15,7 +15,6 @@ import org.junit.Test;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
-import java.util.Optional;
import java.util.function.IntFunction;
import static org.junit.Assert.assertEquals;
@@ -73,14 +72,9 @@ public class ClusterModelTest {
}
private Cluster cluster(NodeResources resources) {
- return new Cluster(ClusterSpec.Id.from("test"),
- false,
- new ClusterResources(5, 1, resources),
- new ClusterResources(5, 1, resources),
- Optional.empty(),
- Optional.empty(),
- List.of(),
- AutoscalingStatus.empty());
+ return Cluster.create(ClusterSpec.Id.from("test"),
+ false,
+ Capacity.from(new ClusterResources(5, 1, resources)));
}
/** Creates the given number of measurements, spaced 5 minutes between, using the given function */
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
index 7266da9ff46..e34f63d8062 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.provision.persistence;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
@@ -33,6 +34,7 @@ public class ApplicationSerializerTest {
false,
new ClusterResources( 8, 4, new NodeResources(1, 2, 3, 4)),
new ClusterResources(12, 6, new NodeResources(3, 6, 21, 24)),
+ true,
Optional.empty(),
Optional.empty(),
List.of(),
@@ -42,6 +44,7 @@ public class ApplicationSerializerTest {
true,
new ClusterResources( 8, 4, minResources),
new ClusterResources(14, 7, new NodeResources(3, 6, 21, 24)),
+ false,
Optional.of(new Cluster.Suggestion(new ClusterResources(20, 10,
new NodeResources(0.5, 4, 14, 16)),
Instant.ofEpochMilli(1234L))),
@@ -72,6 +75,7 @@ public class ApplicationSerializerTest {
assertEquals(originalCluster.exclusive(), serializedCluster.exclusive());
assertEquals(originalCluster.minResources(), serializedCluster.minResources());
assertEquals(originalCluster.maxResources(), serializedCluster.maxResources());
+ assertEquals(originalCluster.required(), serializedCluster.required());
assertEquals(originalCluster.suggestedResources(), serializedCluster.suggestedResources());
assertEquals(originalCluster.targetResources(), serializedCluster.targetResources());
assertEquals(originalCluster.scalingEvents(), serializedCluster.scalingEvents());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 20546cc5bd9..db165aae919 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -1015,10 +1015,10 @@ public class ProvisioningTest {
allHosts.addAll(content1);
Function<Integer, Capacity> capacity = count -> Capacity.from(new ClusterResources(count, 1, NodeResources.unspecified()), required, true);
- int expectedContainer0Size = tester.capacityPolicies().decideSize(container0Size, capacity.apply(container0Size), containerCluster0, application);
- int expectedContainer1Size = tester.capacityPolicies().decideSize(container1Size, capacity.apply(container1Size), containerCluster1, application);
- int expectedContent0Size = tester.capacityPolicies().decideSize(content0Size, capacity.apply(content0Size), contentCluster0, application);
- int expectedContent1Size = tester.capacityPolicies().decideSize(content1Size, capacity.apply(content1Size), contentCluster1, application);
+ int expectedContainer0Size = tester.decideSize(container0Size, capacity.apply(container0Size), containerCluster0, application);
+ int expectedContainer1Size = tester.decideSize(container1Size, capacity.apply(container1Size), containerCluster1, application);
+ int expectedContent0Size = tester.decideSize(content0Size, capacity.apply(content0Size), contentCluster0, application);
+ int expectedContent1Size = tester.decideSize(content1Size, capacity.apply(content1Size), contentCluster1, application);
assertEquals("Hosts in each group cluster is disjunct and the total number of unretired nodes is correct",
expectedContainer0Size + expectedContainer1Size + expectedContent0Size + expectedContent1Size,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index 6d525762ecc..6ca93671087 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -152,6 +152,10 @@ public class ProvisioningTester {
public NodeList getNodes(ApplicationId id, Node.State ... inState) { return nodeRepository.nodes().list(inState).owner(id); }
public InMemoryFlagSource flagSource() { return (InMemoryFlagSource) nodeRepository.flagSource(); }
+ public int decideSize(int size, Capacity capacity, ClusterSpec cluster, ApplicationId application) {
+ return capacityPolicies.decideSize(size, capacity.isRequired(), capacity.canFail(), application.instance().isTester(), cluster);
+ }
+
public Node patchNode(Node node, UnaryOperator<Node> patcher) {
return patchNodes(List.of(node), patcher).get(0);
}
diff --git a/predicate-search-core/abi-spec.json b/predicate-search-core/abi-spec.json
new file mode 100644
index 00000000000..19dbb8a5a76
--- /dev/null
+++ b/predicate-search-core/abi-spec.json
@@ -0,0 +1,309 @@
+{
+ "com.yahoo.document.predicate.BinaryFormat": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public static byte[] encode(com.yahoo.document.predicate.Predicate)",
+ "public static com.yahoo.document.predicate.Predicate decode(byte[])"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.BooleanPredicate": {
+ "superClass": "com.yahoo.document.predicate.PredicateValue",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(boolean)",
+ "public boolean getValue()",
+ "public com.yahoo.document.predicate.BooleanPredicate setValue(boolean)",
+ "public com.yahoo.document.predicate.BooleanPredicate clone()",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)",
+ "protected void appendTo(java.lang.StringBuilder)",
+ "public bridge synthetic com.yahoo.document.predicate.Predicate clone()",
+ "public bridge synthetic java.lang.Object clone()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.Conjunction": {
+ "superClass": "com.yahoo.document.predicate.PredicateOperator",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public varargs void <init>(com.yahoo.document.predicate.Predicate[])",
+ "public void <init>(java.util.List)",
+ "public com.yahoo.document.predicate.Conjunction addOperand(com.yahoo.document.predicate.Predicate)",
+ "public com.yahoo.document.predicate.Conjunction addOperands(java.util.Collection)",
+ "public com.yahoo.document.predicate.Conjunction setOperands(java.util.Collection)",
+ "public java.util.List getOperands()",
+ "public com.yahoo.document.predicate.Conjunction clone()",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)",
+ "protected void appendTo(java.lang.StringBuilder)",
+ "public bridge synthetic com.yahoo.document.predicate.Predicate clone()",
+ "public bridge synthetic java.lang.Object clone()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.Disjunction": {
+ "superClass": "com.yahoo.document.predicate.PredicateOperator",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public varargs void <init>(com.yahoo.document.predicate.Predicate[])",
+ "public void <init>(java.util.List)",
+ "public com.yahoo.document.predicate.Disjunction addOperand(com.yahoo.document.predicate.Predicate)",
+ "public com.yahoo.document.predicate.Disjunction addOperands(java.util.Collection)",
+ "public com.yahoo.document.predicate.Disjunction setOperands(java.util.Collection)",
+ "public java.util.List getOperands()",
+ "public com.yahoo.document.predicate.Disjunction clone()",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)",
+ "protected void appendTo(java.lang.StringBuilder)",
+ "public bridge synthetic com.yahoo.document.predicate.Predicate clone()",
+ "public bridge synthetic java.lang.Object clone()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.FeatureConjunction": {
+ "superClass": "com.yahoo.document.predicate.PredicateOperator",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.util.List)",
+ "public static boolean isValidFeatureConjunctionOperand(com.yahoo.document.predicate.Predicate)",
+ "public java.util.List getOperands()",
+ "protected void appendTo(java.lang.StringBuilder)",
+ "public com.yahoo.document.predicate.FeatureConjunction clone()",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)",
+ "public bridge synthetic com.yahoo.document.predicate.Predicate clone()",
+ "public bridge synthetic java.lang.Object clone()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.FeatureRange": {
+ "superClass": "com.yahoo.document.predicate.PredicateValue",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public void <init>(java.lang.String, java.lang.Long, java.lang.Long)",
+ "public com.yahoo.document.predicate.FeatureRange setKey(java.lang.String)",
+ "public java.lang.String getKey()",
+ "public com.yahoo.document.predicate.FeatureRange setFromInclusive(java.lang.Long)",
+ "public java.lang.Long getFromInclusive()",
+ "public com.yahoo.document.predicate.FeatureRange setToInclusive(java.lang.Long)",
+ "public java.lang.Long getToInclusive()",
+ "public void addPartition(com.yahoo.document.predicate.RangePartition)",
+ "public java.util.List getEdgePartitions()",
+ "public java.util.List getPartitions()",
+ "public void clearPartitions()",
+ "public com.yahoo.document.predicate.FeatureRange clone()",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)",
+ "protected void appendTo(java.lang.StringBuilder)",
+ "public static com.yahoo.document.predicate.FeatureRange buildFromMixedIn(java.lang.String, java.util.List, int)",
+ "public bridge synthetic com.yahoo.document.predicate.Predicate clone()",
+ "public bridge synthetic java.lang.Object clone()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.FeatureSet": {
+ "superClass": "com.yahoo.document.predicate.PredicateValue",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public varargs void <init>(java.lang.String, java.lang.String[])",
+ "public void <init>(java.lang.String, java.util.Collection)",
+ "public com.yahoo.document.predicate.FeatureSet setKey(java.lang.String)",
+ "public java.lang.String getKey()",
+ "public com.yahoo.document.predicate.FeatureSet addValue(java.lang.String)",
+ "public com.yahoo.document.predicate.FeatureSet addValues(java.util.Collection)",
+ "public com.yahoo.document.predicate.FeatureSet setValues(java.util.Collection)",
+ "public java.util.Set getValues()",
+ "public com.yahoo.document.predicate.FeatureSet clone()",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)",
+ "protected void appendTo(java.lang.StringBuilder)",
+ "protected void appendNegatedTo(java.lang.StringBuilder)",
+ "public bridge synthetic com.yahoo.document.predicate.Predicate clone()",
+ "public bridge synthetic java.lang.Object clone()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.Negation": {
+ "superClass": "com.yahoo.document.predicate.PredicateOperator",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.document.predicate.Predicate)",
+ "public com.yahoo.document.predicate.Negation setOperand(com.yahoo.document.predicate.Predicate)",
+ "public com.yahoo.document.predicate.Predicate getOperand()",
+ "public java.util.List getOperands()",
+ "public com.yahoo.document.predicate.Negation clone()",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)",
+ "protected void appendTo(java.lang.StringBuilder)",
+ "public bridge synthetic com.yahoo.document.predicate.Predicate clone()",
+ "public bridge synthetic java.lang.Object clone()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.Predicate": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.lang.Cloneable"
+ ],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public com.yahoo.document.predicate.Predicate clone()",
+ "public final java.lang.String toString()",
+ "protected abstract void appendTo(java.lang.StringBuilder)",
+ "protected static void appendQuotedTo(java.lang.String, java.lang.StringBuilder)",
+ "public static java.lang.String asciiEncode(java.lang.String)",
+ "public static java.lang.String asciiDecode(java.lang.String)",
+ "public static com.yahoo.document.predicate.Predicate fromBinary(byte[])",
+ "public static com.yahoo.document.predicate.Predicate fromString(java.lang.String)",
+ "public bridge synthetic java.lang.Object clone()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.PredicateHash": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public static long hash64(java.lang.String)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.PredicateOperator": {
+ "superClass": "com.yahoo.document.predicate.Predicate",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "abstract"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public abstract java.util.List getOperands()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.Predicates$FeatureBuilder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public com.yahoo.document.predicate.FeatureRange lessThan(long)",
+ "public com.yahoo.document.predicate.FeatureRange lessThanOrEqualTo(long)",
+ "public com.yahoo.document.predicate.FeatureRange greaterThan(long)",
+ "public com.yahoo.document.predicate.FeatureRange greaterThanOrEqualTo(long)",
+ "public com.yahoo.document.predicate.FeatureRange inRange(long, long)",
+ "public com.yahoo.document.predicate.Negation notInRange(long, long)",
+ "public varargs com.yahoo.document.predicate.FeatureSet inSet(java.lang.String[])",
+ "public varargs com.yahoo.document.predicate.Negation notInSet(java.lang.String[])"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.Predicates": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public static varargs com.yahoo.document.predicate.Conjunction and(com.yahoo.document.predicate.Predicate[])",
+ "public static varargs com.yahoo.document.predicate.Disjunction or(com.yahoo.document.predicate.Predicate[])",
+ "public static com.yahoo.document.predicate.Negation not(com.yahoo.document.predicate.Predicate)",
+ "public static com.yahoo.document.predicate.BooleanPredicate value(boolean)",
+ "public static com.yahoo.document.predicate.Predicates$FeatureBuilder feature(java.lang.String)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.RangeEdgePartition": {
+ "superClass": "com.yahoo.document.predicate.RangePartition",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String, long, int, int)",
+ "public long getValue()",
+ "public int getLowerBound()",
+ "public int getUpperBound()",
+ "public com.yahoo.document.predicate.RangeEdgePartition clone()",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)",
+ "protected void appendTo(java.lang.StringBuilder)",
+ "public long encodeBounds()",
+ "public bridge synthetic com.yahoo.document.predicate.RangePartition clone()",
+ "public bridge synthetic com.yahoo.document.predicate.Predicate clone()",
+ "public bridge synthetic java.lang.Object clone()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.RangePartition": {
+ "superClass": "com.yahoo.document.predicate.PredicateValue",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public void <init>(java.lang.String, long, long, boolean)",
+ "public java.lang.String getLabel()",
+ "public com.yahoo.document.predicate.RangePartition clone()",
+ "public int hashCode()",
+ "public boolean equals(java.lang.Object)",
+ "protected void appendTo(java.lang.StringBuilder)",
+ "public bridge synthetic com.yahoo.document.predicate.Predicate clone()",
+ "public bridge synthetic java.lang.Object clone()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.document.predicate.SimplePredicates": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public static com.yahoo.document.predicate.Predicate newPredicate()",
+ "public static com.yahoo.document.predicate.Predicate newString(java.lang.String)",
+ "public static varargs java.util.List newStrings(java.lang.String[])"
+ ],
+ "fields": []
+ }
+} \ No newline at end of file
diff --git a/predicate-search-core/pom.xml b/predicate-search-core/pom.xml
index 08354fd5826..c30cf4633a8 100644
--- a/predicate-search-core/pom.xml
+++ b/predicate-search-core/pom.xml
@@ -42,6 +42,10 @@
<build>
<plugins>
<plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>abi-check-plugin</artifactId>
+ </plugin>
+ <plugin>
<groupId>org.antlr</groupId>
<artifactId>antlr3-maven-plugin</artifactId>
<executions>
diff --git a/predicate-search/abi-spec.json b/predicate-search/abi-spec.json
new file mode 100644
index 00000000000..c110ffe7a43
--- /dev/null
+++ b/predicate-search/abi-spec.json
@@ -0,0 +1,159 @@
+{
+ "com.yahoo.search.predicate.Config$Builder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public com.yahoo.search.predicate.Config$Builder setArity(int)",
+ "public com.yahoo.search.predicate.Config$Builder setLowerBound(long)",
+ "public com.yahoo.search.predicate.Config$Builder setUpperBound(long)",
+ "public com.yahoo.search.predicate.Config$Builder setUseConjunctionAlgorithm(boolean)",
+ "public com.yahoo.search.predicate.Config build()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.search.predicate.Config": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void writeToOutputStream(java.io.DataOutputStream)",
+ "public static com.yahoo.search.predicate.Config fromInputStream(java.io.DataInputStream)"
+ ],
+ "fields": [
+ "public final int arity",
+ "public final long lowerBound",
+ "public final long upperBound",
+ "public final boolean useConjunctionAlgorithm"
+ ]
+ },
+ "com.yahoo.search.predicate.Hit": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.lang.Comparable"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(int)",
+ "public void <init>(int, long)",
+ "public java.lang.String toString()",
+ "public boolean equals(java.lang.Object)",
+ "public int hashCode()",
+ "public int getDocId()",
+ "public long getSubquery()",
+ "public int compareTo(com.yahoo.search.predicate.Hit)",
+ "public bridge synthetic int compareTo(java.lang.Object)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.search.predicate.PredicateIndex$Searcher": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public java.util.stream.Stream search(com.yahoo.search.predicate.PredicateQuery)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.search.predicate.PredicateIndex": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void rebuildPostingListCache()",
+ "public com.yahoo.search.predicate.PredicateIndex$Searcher searcher()",
+ "public void writeToOutputStream(java.io.DataOutputStream)",
+ "public static com.yahoo.search.predicate.PredicateIndex fromInputStream(java.io.DataInputStream)"
+ ],
+ "fields": []
+ },
+ "com.yahoo.search.predicate.PredicateIndexBuilder$PredicateIndexStats": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.util.List, com.yahoo.search.predicate.index.SimpleIndex$Builder, com.yahoo.search.predicate.index.SimpleIndex$Builder, com.yahoo.search.predicate.index.PredicateIntervalStore$Builder, com.yahoo.search.predicate.index.conjunction.ConjunctionIndexBuilder, int, int)",
+ "public void putValues(java.util.Map)",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.search.predicate.PredicateIndexBuilder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(int)",
+ "public void <init>(int, long, long)",
+ "public void <init>(com.yahoo.search.predicate.Config)",
+ "public void indexDocument(int, com.yahoo.document.predicate.Predicate)",
+ "public com.yahoo.search.predicate.PredicateIndex build()",
+ "public int getZeroConstraintDocCount()",
+ "public com.yahoo.search.predicate.PredicateIndexBuilder$PredicateIndexStats getStats()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.search.predicate.PredicateQuery$Feature": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String, java.lang.String, long)"
+ ],
+ "fields": [
+ "public final java.lang.String key",
+ "public final java.lang.String value",
+ "public final long subqueryBitmap",
+ "public final long featureHash"
+ ]
+ },
+ "com.yahoo.search.predicate.PredicateQuery$RangeFeature": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String, long, long)"
+ ],
+ "fields": [
+ "public final java.lang.String key",
+ "public final long value",
+ "public final long subqueryBitmap"
+ ]
+ },
+ "com.yahoo.search.predicate.PredicateQuery": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void addFeature(java.lang.String, java.lang.String)",
+ "public void addFeature(java.lang.String, java.lang.String, long)",
+ "public void addRangeFeature(java.lang.String, long)",
+ "public void addRangeFeature(java.lang.String, long, long)",
+ "public java.util.List getFeatures()",
+ "public java.util.List getRangeFeatures()"
+ ],
+ "fields": []
+ }
+} \ No newline at end of file
diff --git a/predicate-search/pom.xml b/predicate-search/pom.xml
index 02dd7d31ec1..3f0b8b3de4d 100644
--- a/predicate-search/pom.xml
+++ b/predicate-search/pom.xml
@@ -57,6 +57,10 @@
<build>
<plugins>
<plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>abi-check-plugin</artifactId>
+ </plugin>
+ <plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<descriptorRefs>
diff --git a/screwdriver.yaml b/screwdriver.yaml
index 93fe3a934c3..597f5f2678e 100644
--- a/screwdriver.yaml
+++ b/screwdriver.yaml
@@ -139,9 +139,6 @@ jobs:
cd $WORKDIR/vespa
export FACTORY_VESPA_VERSION=$VESPA_VERSION
NUM_THREADS=$(( $(nproc) + 2 ))
- git --version
- which git
- go version
time make -C client/go BIN=$WORKDIR/vespa-install/opt/vespa/bin
time ./bootstrap.sh java
time mvn -T $NUM_THREADS $VESPA_MAVEN_EXTRA_OPTS install
diff --git a/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp b/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp
index dc51d0df8a1..a57787d417e 100644
--- a/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp
@@ -23,18 +23,10 @@ using namespace proton;
typedef std::vector<IFeedView::SP> FeedViewVector;
-struct MyStreamHandler : public NewConfigOperation::IStreamHandler
-{
- void serializeConfig(SerialNum, vespalib::nbostream &) override {}
- void deserializeConfig(SerialNum, vespalib::nbostream &) override {}
-};
-
-
struct MyFeedView : public test::DummyFeedView
{
typedef std::shared_ptr<MyFeedView> SP;
DocumentMetaStore _metaStore;
- MyStreamHandler _streamHandler;
uint32_t _preparePut;
uint32_t _handlePut;
uint32_t _prepareRemove;
@@ -56,7 +48,6 @@ struct MyFeedView : public test::DummyFeedView
DocumentMetaStore::getFixedName(),
search::GrowStrategy(),
subDbType),
- _streamHandler(),
_preparePut(0),
_handlePut(0),
_prepareRemove(0),
@@ -82,11 +73,11 @@ struct MyFeedView : public test::DummyFeedView
void prepareRemove(RemoveOperation &) override { ++_prepareRemove; }
void handleRemove(FeedToken, const RemoveOperation &) override { ++_handleRemove; }
void prepareDeleteBucket(DeleteBucketOperation &) override { ++_prepareDeleteBucket; }
- void handleDeleteBucket(const DeleteBucketOperation &) override { ++_handleDeleteBucket; }
+ void handleDeleteBucket(const DeleteBucketOperation &, DoneCallback) override { ++_handleDeleteBucket; }
void prepareMove(MoveOperation &) override { ++_prepareMove; }
- void handleMove(const MoveOperation &, IDestructorCallback::SP) override { ++_handleMove; }
+ void handleMove(const MoveOperation &, DoneCallback) override { ++_handleMove; }
void heartBeat(SerialNum, DoneCallback) override { ++_heartBeat; }
- void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &) override { ++_handlePrune; }
+ void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &, DoneCallback) override { ++_handlePrune; }
void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback) override {
_wantedLidLimit = op.getLidLimit();
}
@@ -362,7 +353,7 @@ TEST_F("require that delete bucket is sent to all feed views", Fixture)
EXPECT_EQUAL(1u, f._ready._view->_prepareDeleteBucket);
EXPECT_EQUAL(1u, f._removed._view->_prepareDeleteBucket);
EXPECT_EQUAL(1u, f._notReady._view->_prepareDeleteBucket);
- f._view.handleDeleteBucket(op);
+ f._view.handleDeleteBucket(op, IDestructorCallback::SP());
EXPECT_EQUAL(1u, f._ready._view->_handleDeleteBucket);
EXPECT_EQUAL(1u, f._removed._view->_handleDeleteBucket);
EXPECT_EQUAL(1u, f._notReady._view->_handleDeleteBucket);
@@ -381,7 +372,7 @@ TEST_F("require that heart beat is sent to all feed views", Fixture)
TEST_F("require that prune removed documents is sent to removed view", Fixture)
{
PruneRemovedDocumentsOperation op;
- f._view.handlePruneRemovedDocuments(op);
+ f._view.handlePruneRemovedDocuments(op, IDestructorCallback::SP());
EXPECT_EQUAL(0u, f._ready._view->_handlePrune);
EXPECT_EQUAL(1u, f._removed._view->_handlePrune);
EXPECT_EQUAL(0u, f._notReady._view->_handlePrune);
diff --git a/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp b/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp
index 4629ebec854..8d7e842bc89 100644
--- a/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/executor_threading_service/executor_threading_service_test.cpp
@@ -32,6 +32,7 @@ public:
void setup(uint32_t indexing_threads, SharedFieldWriterExecutor shared_field_writer) {
service = std::make_unique<ExecutorThreadingService>(shared_executor,
field_writer_executor.get(),
+ nullptr,
ThreadingServiceConfig::make(indexing_threads, shared_field_writer));
}
SequencedTaskExecutor* index_inverter() {
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
index 1963e5e9dbc..904937a26da 100644
--- a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
@@ -236,7 +236,7 @@ struct MyFeedView : public test::DummyFeedView {
}
void handleMove(const MoveOperation &, DoneCallback) override { ++move_count; }
void heartBeat(SerialNum, DoneCallback) override { ++heartbeat_count; }
- void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &) override { ++prune_removed_count; }
+ void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &, DoneCallback) override { ++prune_removed_count; }
const ISimpleDocumentMetaStore *getDocumentMetaStorePtr() const override {
return nullptr;
}
diff --git a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
index 4c566f6053b..824a9273404 100644
--- a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
@@ -515,11 +515,6 @@ struct FixtureBase
}
template <typename FunctionType>
- void runInMasterAndSyncAll(FunctionType func) {
- test::runInMaster(_writeService, func);
- _writeServiceReal.sync_all_executors();
- }
- template <typename FunctionType>
void runInMaster(FunctionType func) {
test::runInMaster(_writeService, func);
}
@@ -617,10 +612,10 @@ struct FixtureBase
gate.await();
}
- void performDeleteBucket(DeleteBucketOperation &op) {
+ void performDeleteBucket(DeleteBucketOperation &op, IDestructorCallback::SP onDone) {
getFeedView().prepareDeleteBucket(op);
op.setSerialNum(++serial);
- getFeedView().handleDeleteBucket(op);
+ getFeedView().handleDeleteBucket(op, onDone);
}
void performForceCommit(IDestructorCallback::SP onDone) {
@@ -656,7 +651,7 @@ struct FixtureBase
auto &fv = getFeedView();
CompactLidSpaceOperation op(0, wantedLidLimit);
op.setSerialNum(++serial);
- fv.handleCompactLidSpace(op, std::move(onDone));
+ fv.handleCompactLidSpace(op, onDone);
}
void compactLidSpaceAndWait(uint32_t wantedLidLimit) {
Gate gate;
@@ -664,6 +659,7 @@ struct FixtureBase
performCompactLidSpace(wantedLidLimit, std::make_shared<GateCallback>(gate));
});
gate.await();
+ _writeService.master().sync();
}
void assertChangeHandler(document::GlobalId expGid, uint32_t expLid, uint32_t expChanges) {
_gidToLidChangeHandler->assertChanges(expGid, expLid, expChanges);
@@ -950,7 +946,11 @@ TEST_F("require that handleDeleteBucket() removes documents", SearchableFeedView
// delete bucket for user 1
DeleteBucketOperation op(docs[0].bid);
- f.runInMasterAndSyncAll([&]() { f.performDeleteBucket(op); });
+ vespalib::Gate gate;
+ f.runInMaster([&, onDone=std::make_shared<GateCallback>(gate)]() {
+ f.performDeleteBucket(op, std::move(onDone));
+ });
+ gate.await();
f.dms_commit();
EXPECT_EQUAL(0u, f.getBucketDB()->get(docs[0].bid).getDocumentCount());
@@ -1174,6 +1174,7 @@ TEST_F("require that compactLidSpace() doesn't propagate to "
f.fv.handleCompactLidSpace(op, std::move(onDone));
});
gate.await();
+ f._writeService.master().sync();
// Delayed holdUnblockShrinkLidSpace() in index thread, then master thread
EXPECT_TRUE(assertThreadObserver(6, 6, 5, f.writeServiceObserver()));
EXPECT_EQUAL(0u, f.metaStoreObserver()._compactLidSpaceLidLimit);
diff --git a/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp
index fb609b1829e..1c2d903fead 100644
--- a/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/storeonlyfeedview/storeonlyfeedview_test.cpp
@@ -114,11 +114,11 @@ struct MyMinimalFeedView : public MyMinimalFeedViewBase, public StoreOnlyFeedVie
StoreOnlyFeedView::removeIndexedFields(s, l, onWriteDone);
++removeMultiIndexFieldsCount;
}
- void heartBeatIndexedFields(SerialNum s, IDestructorCallbackSP onDone) override {
+ void heartBeatIndexedFields(SerialNum s, DoneCallback onDone) override {
StoreOnlyFeedView::heartBeatIndexedFields(s, onDone);
++heartBeatIndexedFieldsCount;
}
- void heartBeatAttributes(SerialNum s, IDestructorCallbackSP onDone) override {
+ void heartBeatAttributes(SerialNum s, DoneCallback onDone) override {
StoreOnlyFeedView::heartBeatAttributes(s, onDone);
++heartBeatAttributesCount;
}
@@ -383,7 +383,11 @@ TEST_F("require that prune removed documents removes documents",
PruneRemovedDocumentsOperation op(lids->getDocIdLimit(), subdb_id);
op.setLidsToRemove(lids);
op.setSerialNum(1); // allows use of meta store.
- f.runInMasterAndSyncAll([&]() { f.feedview->handlePruneRemovedDocuments(op); });
+ vespalib::Gate gate;
+ f.runInMaster([&, onDone=std::make_shared<vespalib::GateCallback>(gate)]() {
+ f.feedview->handlePruneRemovedDocuments(op, std::move(onDone));
+ });
+ gate.await();
EXPECT_EQUAL(2, f.removeCount);
EXPECT_FALSE(f.metaStore->validLid(1));
diff --git a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp
index 23ca17c7656..297a9b9254f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp
@@ -187,10 +187,10 @@ CombiningFeedView::prepareDeleteBucket(DeleteBucketOperation &delOp)
}
void
-CombiningFeedView::handleDeleteBucket(const DeleteBucketOperation &delOp)
+CombiningFeedView::handleDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone)
{
for (const auto &view : _views) {
- view->handleDeleteBucket(delOp);
+ view->handleDeleteBucket(delOp, onDone);
}
}
@@ -203,7 +203,7 @@ CombiningFeedView::prepareMove(MoveOperation &moveOp)
}
void
-CombiningFeedView::handleMove(const MoveOperation &moveOp, IDestructorCallback::SP moveDoneCtx)
+CombiningFeedView::handleMove(const MoveOperation &moveOp, DoneCallback moveDoneCtx)
{
assert(moveOp.getValidDbdId());
uint32_t subDbId = moveOp.getSubDbId();
@@ -235,9 +235,9 @@ CombiningFeedView::forceCommit(const CommitParam & param, DoneCallback onDone)
void
CombiningFeedView::
-handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp)
+handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp, DoneCallback onDone)
{
- getRemFeedView()->handlePruneRemovedDocuments(pruneOp);
+ getRemFeedView()->handlePruneRemovedDocuments(pruneOp, onDone);
}
void
@@ -245,7 +245,7 @@ CombiningFeedView::handleCompactLidSpace(const CompactLidSpaceOperation &op, Don
{
uint32_t subDbId = op.getSubDbId();
assert(subDbId < _views.size());
- _views[subDbId]->handleCompactLidSpace(op, std::move(onDone));
+ _views[subDbId]->handleCompactLidSpace(op, onDone);
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h
index 81a4f4f9588..ff640a32887 100644
--- a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h
+++ b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h
@@ -72,11 +72,11 @@ public:
void prepareRemove(RemoveOperation &rmOp) override;
void handleRemove(FeedToken token, const RemoveOperation &rmOp) override;
void prepareDeleteBucket(DeleteBucketOperation &delOp) override;
- void handleDeleteBucket(const DeleteBucketOperation &delOp) override;
void prepareMove(MoveOperation &putOp) override;
- void handleMove(const MoveOperation &moveOp, std::shared_ptr<vespalib::IDestructorCallback> moveDoneCtx) override;
+ void handleDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) override;
+ void handleMove(const MoveOperation &moveOp, DoneCallback onDone) override;
void heartBeat(search::SerialNum serialNum, DoneCallback onDone) override;
- void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp) override;
+ void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp, DoneCallback onDone) override;
void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) override;
// Called by document db executor
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
index 2b2f2422221..53bdc356015 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
@@ -175,7 +175,7 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir,
_baseDir(baseDir + "/" + _docTypeName.toString()),
// Only one thread per executor, or performDropFeedView() will fail.
_writeServiceConfig(configSnapshot->get_threading_service_config()),
- _writeService(shared_service.shared(), shared_service.field_writer(), _writeServiceConfig, indexing_thread_stack_size),
+ _writeService(shared_service.shared(), shared_service.field_writer(), &shared_service.invokeService(), _writeServiceConfig, indexing_thread_stack_size),
_initializeThreads(std::move(initializeThreads)),
_initConfigSnapshot(),
_initConfigSerialNum(0u),
diff --git a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp
index 52da92ed568..bca8e89d69e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.cpp
@@ -39,11 +39,12 @@ VESPA_THREAD_STACK_TAG(field_writer_executor)
}
ExecutorThreadingService::ExecutorThreadingService(vespalib::ThreadExecutor &sharedExecutor, uint32_t num_treads)
- : ExecutorThreadingService(sharedExecutor, nullptr, ThreadingServiceConfig::make(num_treads))
+ : ExecutorThreadingService(sharedExecutor, nullptr, nullptr, ThreadingServiceConfig::make(num_treads))
{}
ExecutorThreadingService::ExecutorThreadingService(vespalib::ThreadExecutor& sharedExecutor,
vespalib::ISequencedTaskExecutor* field_writer,
+ vespalib::InvokeService * invokerService,
const ThreadingServiceConfig& cfg,
uint32_t stackSize)
@@ -61,12 +62,20 @@ ExecutorThreadingService::ExecutorThreadingService(vespalib::ThreadExecutor& sha
_field_writer(),
_index_field_inverter_ptr(),
_index_field_writer_ptr(),
- _attribute_field_writer_ptr()
+ _attribute_field_writer_ptr(),
+ _invokeRegistrations()
{
+ if (cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT && invokerService) {
+ _invokeRegistrations.push_back(invokerService->registerInvoke([executor=_indexExecutor.get()](){ executor->wakeup();}));
+ _invokeRegistrations.push_back(invokerService->registerInvoke([executor=_summaryExecutor.get()](){ executor->wakeup();}));
+ }
if (_shared_field_writer == SharedFieldWriterExecutor::INDEX) {
_field_writer = SequencedTaskExecutor::create(field_writer_executor, cfg.indexingThreads() * 2, cfg.defaultTaskLimit());
_attributeFieldWriter = SequencedTaskExecutor::create(attribute_field_writer_executor, cfg.indexingThreads(), cfg.defaultTaskLimit(),
cfg.optimize(), cfg.kindOfwatermark(), cfg.reactionTime());
+ if (cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT && invokerService) {
+ _invokeRegistrations.push_back(invokerService->registerInvoke([executor=_attributeFieldWriter.get()](){ executor->wakeup();}));
+ }
_index_field_inverter_ptr = _field_writer.get();
_index_field_writer_ptr = _field_writer.get();
_attribute_field_writer_ptr = _attributeFieldWriter.get();
@@ -74,6 +83,9 @@ ExecutorThreadingService::ExecutorThreadingService(vespalib::ThreadExecutor& sha
} else if (_shared_field_writer == SharedFieldWriterExecutor::INDEX_AND_ATTRIBUTE) {
_field_writer = SequencedTaskExecutor::create(field_writer_executor, cfg.indexingThreads() * 3, cfg.defaultTaskLimit(),
cfg.optimize(), cfg.kindOfwatermark(), cfg.reactionTime());
+ if (cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT && invokerService) {
+ _invokeRegistrations.push_back(invokerService->registerInvoke([executor=_field_writer.get()](){ executor->wakeup();}));
+ }
_index_field_inverter_ptr = _field_writer.get();
_index_field_writer_ptr = _field_writer.get();
_attribute_field_writer_ptr = _field_writer.get();
@@ -87,6 +99,9 @@ ExecutorThreadingService::ExecutorThreadingService(vespalib::ThreadExecutor& sha
_indexFieldWriter = SequencedTaskExecutor::create(index_field_writer_executor, cfg.indexingThreads(), cfg.defaultTaskLimit());
_attributeFieldWriter = SequencedTaskExecutor::create(attribute_field_writer_executor, cfg.indexingThreads(), cfg.defaultTaskLimit(),
cfg.optimize(), cfg.kindOfwatermark(), cfg.reactionTime());
+ if (cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT && invokerService) {
+ _invokeRegistrations.push_back(invokerService->registerInvoke([executor=_attributeFieldWriter.get()](){ executor->wakeup();}));
+ }
_index_field_inverter_ptr = _indexFieldInverter.get();
_index_field_writer_ptr = _indexFieldWriter.get();
_attribute_field_writer_ptr = _attributeFieldWriter.get();
diff --git a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h
index 972e0de0ec0..e55e95c6745 100644
--- a/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h
+++ b/searchcore/src/vespa/searchcore/proton/server/executorthreadingservice.h
@@ -5,6 +5,7 @@
#include "threading_service_config.h"
#include <vespa/searchcorespi/index/ithreadingservice.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/vespalib/util/invokeservice.h>
namespace proton {
@@ -17,6 +18,7 @@ class ExecutorThreadingServiceStats;
class ExecutorThreadingService : public searchcorespi::index::IThreadingService
{
private:
+ using Registration = std::unique_ptr<vespalib::IDestructorCallback>;
vespalib::ThreadExecutor & _sharedExecutor;
vespalib::ThreadStackExecutor _masterExecutor;
ThreadingServiceConfig::SharedFieldWriterExecutor _shared_field_writer;
@@ -32,6 +34,7 @@ private:
vespalib::ISequencedTaskExecutor* _index_field_inverter_ptr;
vespalib::ISequencedTaskExecutor* _index_field_writer_ptr;
vespalib::ISequencedTaskExecutor* _attribute_field_writer_ptr;
+ std::vector<Registration> _invokeRegistrations;
void syncOnce();
public:
@@ -43,6 +46,7 @@ public:
ExecutorThreadingService(vespalib::ThreadExecutor& sharedExecutor,
vespalib::ISequencedTaskExecutor* field_writer,
+ vespalib::InvokeService * invokeService,
const ThreadingServiceConfig& cfg,
uint32_t stackSize = 128 * 1024);
~ExecutorThreadingService() override;
diff --git a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
index dc49aeb8f88..0ac91870eab 100644
--- a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
@@ -281,7 +281,7 @@ FeedHandler::performDeleteBucket(FeedToken token, DeleteBucketOperation &op) {
_activeFeedView->prepareDeleteBucket(op);
appendOperation(op, token);
// Delete documents in bucket
- _activeFeedView->handleDeleteBucket(op);
+ _activeFeedView->handleDeleteBucket(op, token);
// Delete bucket itself, should no longer have documents.
_bucketDBHandler->handleDeleteBucket(op.getBucketId());
@@ -375,7 +375,9 @@ FeedHandler::changeFeedState(FeedStateSP newState)
if (_writeService.master().isCurrentThread()) {
doChangeFeedState(std::move(newState));
} else {
- _writeService.master().execute(makeLambdaTask([this, newState=std::move(newState)] () { doChangeFeedState(std::move(newState));}));
+ _writeService.master().execute(makeLambdaTask([this, newState=std::move(newState)] () {
+ doChangeFeedState(std::move(newState));
+ }));
_writeService.master().sync();
}
}
@@ -793,13 +795,13 @@ FeedHandler::eof()
}
void
-FeedHandler::
-performPruneRemovedDocuments(PruneRemovedDocumentsOperation &pruneOp)
+FeedHandler::performPruneRemovedDocuments(PruneRemovedDocumentsOperation &pruneOp)
{
const LidVectorContext::SP lids_to_remove = pruneOp.getLidsToRemove();
+ vespalib::IDestructorCallback::SP onDone;
if (lids_to_remove && lids_to_remove->getNumLids() != 0) {
- appendOperation(pruneOp, DoneCallback());
- _activeFeedView->handlePruneRemovedDocuments(pruneOp);
+ appendOperation(pruneOp, onDone);
+ _activeFeedView->handlePruneRemovedDocuments(pruneOp, onDone);
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/feedstates.cpp b/searchcore/src/vespa/searchcore/proton/server/feedstates.cpp
index 8de727d664f..d2626a0d9f4 100644
--- a/searchcore/src/vespa/searchcore/proton/server/feedstates.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/feedstates.cpp
@@ -20,6 +20,7 @@ using search::transactionlog::client::RPC;
using search::SerialNum;
using vespalib::Executor;
using vespalib::makeLambdaTask;
+using vespalib::IDestructorCallback;
using vespalib::make_string;
using proton::bucketdb::IBucketDBHandler;
@@ -83,7 +84,7 @@ public:
}
void replay(const DeleteBucketOperation &op) override {
- _feed_view_ptr->handleDeleteBucket(op);
+ _feed_view_ptr->handleDeleteBucket(op, IDestructorCallback::SP());
}
void replay(const SplitBucketOperation &op) override {
_bucketDBHandler.handleSplit(op.getSerialNum(), op.getSource(),
@@ -94,15 +95,15 @@ public:
op.getSource2(), op.getTarget());
}
void replay(const PruneRemovedDocumentsOperation &op) override {
- _feed_view_ptr->handlePruneRemovedDocuments(op);
+ _feed_view_ptr->handlePruneRemovedDocuments(op, IDestructorCallback::SP());
}
void replay(const MoveOperation &op) override {
- _feed_view_ptr->handleMove(op, vespalib::IDestructorCallback::SP());
+ _feed_view_ptr->handleMove(op, IDestructorCallback::SP());
}
void replay(const CreateBucketOperation &) override {
}
void replay(const CompactLidSpaceOperation &op) override {
- _feed_view_ptr->handleCompactLidSpace(op, vespalib::IDestructorCallback::SP());
+ _feed_view_ptr->handleCompactLidSpace(op, IDestructorCallback::SP());
}
NewConfigOperation::IStreamHandler &getNewConfigStreamHandler() override {
return _config_store;
diff --git a/searchcore/src/vespa/searchcore/proton/server/i_shared_threading_service.h b/searchcore/src/vespa/searchcore/proton/server/i_shared_threading_service.h
index 9cd19012223..dfa48cb8d1a 100644
--- a/searchcore/src/vespa/searchcore/proton/server/i_shared_threading_service.h
+++ b/searchcore/src/vespa/searchcore/proton/server/i_shared_threading_service.h
@@ -4,6 +4,7 @@
namespace vespalib {
class ISequencedTaskExecutor;
class ThreadExecutor;
+class InvokeService;
}
namespace proton {
@@ -38,6 +39,11 @@ public:
* TODO: Make this a reference when it is always shared.
*/
virtual vespalib::ISequencedTaskExecutor* field_writer() = 0;
+
+ /**
+ * Returns an InvokeService intended for regular wakeup calls.
+ */
+ virtual vespalib::InvokeService & invokeService() = 0;
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/ifeedview.h b/searchcore/src/vespa/searchcore/proton/server/ifeedview.h
index e0c9fe58c5b..83a91520e5d 100644
--- a/searchcore/src/vespa/searchcore/proton/server/ifeedview.h
+++ b/searchcore/src/vespa/searchcore/proton/server/ifeedview.h
@@ -29,7 +29,8 @@ protected:
IFeedView() = default;
public:
using SP = std::shared_ptr<IFeedView>;
- using DoneCallback = std::shared_ptr<vespalib::IDestructorCallback>;
+ using DoneCallback = const std::shared_ptr<vespalib::IDestructorCallback> &;
+ using IDestructorCallbackSP = std::shared_ptr<vespalib::IDestructorCallback>;
using CommitParam = search::CommitParam;
IFeedView(const IFeedView &) = delete;
@@ -55,16 +56,16 @@ public:
virtual void prepareRemove(RemoveOperation &rmOp) = 0;
virtual void handleRemove(FeedToken token, const RemoveOperation &rmOp) = 0;
virtual void prepareDeleteBucket(DeleteBucketOperation &delOp) = 0;
- virtual void handleDeleteBucket(const DeleteBucketOperation &delOp) = 0;
virtual void prepareMove(MoveOperation &putOp) = 0;
+ virtual void handleDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) = 0;
virtual void handleMove(const MoveOperation &putOp, DoneCallback onDone) = 0;
virtual void heartBeat(search::SerialNum serialNum, DoneCallback onDone) = 0;
virtual void forceCommit(const CommitParam & param, DoneCallback onDone) = 0;
- void forceCommit(CommitParam param) { forceCommit(param, DoneCallback()); }
+ virtual void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation & pruneOp, DoneCallback onDone) = 0;
+ virtual void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) = 0;
+ void forceCommit(CommitParam param) { forceCommit(param, IDestructorCallbackSP()); }
void forceCommit(search::SerialNum serialNum) { forceCommit(CommitParam(serialNum)); }
void forceCommitAndWait(CommitParam param);
- virtual void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation & pruneOp) = 0;
- virtual void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) = 0;
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/proton.cpp b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
index 30a23aaa3d5..0bcbbc14650 100644
--- a/searchcore/src/vespa/searchcore/proton/server/proton.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
@@ -40,6 +40,7 @@
#include <vespa/vespalib/util/random.h>
#include <vespa/vespalib/util/sequencedtaskexecutor.h>
#include <vespa/vespalib/util/size_literals.h>
+#include <vespa/vespalib/util/invokeserviceimpl.h>
#ifdef __linux__
#include <malloc.h>
#endif
diff --git a/searchcore/src/vespa/searchcore/proton/server/proton.h b/searchcore/src/vespa/searchcore/proton/server/proton.h
index 91635dc7497..c18737f22b5 100644
--- a/searchcore/src/vespa/searchcore/proton/server/proton.h
+++ b/searchcore/src/vespa/searchcore/proton/server/proton.h
@@ -29,7 +29,9 @@
#include <mutex>
#include <shared_mutex>
-namespace vespalib { class StateServer; }
+namespace vespalib {
+ class StateServer;
+}
namespace search::transactionlog { class TransLogServerApp; }
namespace metrics { class MetricLockGuard; }
namespace storage::spi { struct PersistenceProvider; }
@@ -61,6 +63,7 @@ private:
using DocumentDBMap = std::map<DocTypeName, DocumentDB::SP>;
using InitializeThreads = std::shared_ptr<vespalib::ThreadExecutor>;
using BucketSpace = document::BucketSpace;
+ using InvokeService = vespalib::InvokeService;
class ProtonFileHeaderContext : public search::common::FileHeaderContext
{
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp b/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp
index dc7b48f0f02..66b1ba1ae2e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp
@@ -157,9 +157,9 @@ SearchableFeedView::removeIndexedFields(SerialNum serialNum, const LidVector &li
}
void
-SearchableFeedView::internalDeleteBucket(const DeleteBucketOperation &delOp)
+SearchableFeedView::internalDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone)
{
- Parent::internalDeleteBucket(delOp);
+ Parent::internalDeleteBucket(delOp, onDone);
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.h b/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.h
index 7dd7879a136..70b02c4a39f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.h
+++ b/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.h
@@ -41,7 +41,7 @@ private:
void performIndexRemove(SerialNum serialNum, const LidVector &lidsToRemove, OnWriteDoneType onWriteDone);
void performIndexHeartBeat(SerialNum serialNum);
- void internalDeleteBucket(const DeleteBucketOperation &delOp) override;
+ void internalDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) override;
void heartBeatIndexedFields(SerialNum serialNum, DoneCallback onDone) override;
void putIndexedFields(SerialNum serialNum, search::DocumentIdT lid, const DocumentSP &newDoc, OnOperationDoneType onWriteDone) override;
diff --git a/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.cpp b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.cpp
index c4fc79c43fd..fa4771bee1d 100644
--- a/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.cpp
@@ -5,6 +5,7 @@
#include <vespa/vespalib/util/isequencedtaskexecutor.h>
#include <vespa/vespalib/util/sequencedtaskexecutor.h>
#include <vespa/vespalib/util/size_literals.h>
+#include <vespa/vespalib/util/invokeserviceimpl.h>
VESPA_THREAD_STACK_TAG(proton_field_writer_executor)
VESPA_THREAD_STACK_TAG(proton_shared_executor)
@@ -18,7 +19,9 @@ SharedThreadingService::SharedThreadingService(const SharedThreadingServiceConfi
: _warmup(cfg.warmup_threads(), 128_Ki, proton_warmup_executor),
_shared(std::make_shared<vespalib::BlockingThreadStackExecutor>(cfg.shared_threads(), 128_Ki,
cfg.shared_task_limit(), proton_shared_executor)),
- _field_writer()
+ _field_writer(),
+ _invokeService(5ms),
+ _invokeRegistrations()
{
const auto& fw_cfg = cfg.field_writer_config();
if (fw_cfg.shared_field_writer() == SharedFieldWriterExecutor::DOCUMENT_DB) {
@@ -28,6 +31,11 @@ SharedThreadingService::SharedThreadingService(const SharedThreadingServiceConfi
fw_cfg.optimize(),
fw_cfg.kindOfwatermark(),
fw_cfg.reactionTime());
+ if (fw_cfg.optimize() == vespalib::Executor::OptimizeFor::THROUGHPUT) {
+ _invokeRegistrations.push_back(_invokeService.registerInvoke([executor = _field_writer.get()]() {
+ executor->wakeup();
+ }));
+ }
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.h b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.h
index 6006d484c97..cd0e6d71402 100644
--- a/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.h
+++ b/searchcore/src/vespa/searchcore/proton/server/shared_threading_service.h
@@ -5,6 +5,7 @@
#include "shared_threading_service_config.h"
#include <vespa/vespalib/util/threadstackexecutor.h>
#include <vespa/vespalib/util/syncable.h>
+#include <vespa/vespalib/util/invokeserviceimpl.h>
#include <memory>
namespace proton {
@@ -14,9 +15,12 @@ namespace proton {
*/
class SharedThreadingService : public ISharedThreadingService {
private:
+ using Registration = std::unique_ptr<vespalib::IDestructorCallback>;
vespalib::ThreadStackExecutor _warmup;
std::shared_ptr<vespalib::SyncableThreadExecutor> _shared;
std::unique_ptr<vespalib::ISequencedTaskExecutor> _field_writer;
+ vespalib::InvokeServiceImpl _invokeService;
+ std::vector<Registration> _invokeRegistrations;
public:
SharedThreadingService(const SharedThreadingServiceConfig& cfg);
@@ -28,6 +32,7 @@ public:
vespalib::ThreadExecutor& warmup() override { return _warmup; }
vespalib::ThreadExecutor& shared() override { return *_shared; }
vespalib::ISequencedTaskExecutor* field_writer() override { return _field_writer.get(); }
+ vespalib::InvokeService & invokeService() override { return _invokeService; }
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp
index 8ca0f232287..97bd940b403 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp
@@ -16,7 +16,6 @@
#include <vespa/searchcore/proton/reference/i_gid_to_lid_change_handler.h>
#include <vespa/searchcore/proton/reference/i_pending_gid_to_lid_changes.h>
#include <vespa/vespalib/util/destructor_callbacks.h>
-#include <vespa/searchlib/common/scheduletaskcallback.h>
#include <vespa/vespalib/util/isequencedtaskexecutor.h>
#include <vespa/vespalib/util/exceptions.h>
@@ -180,7 +179,7 @@ StoreOnlyFeedView::forceCommit(const CommitParam & param, DoneCallback onDone)
internalForceCommit(param, std::make_shared<ForceCommitContext>(_writeService.master(), _metaStore,
_pendingLidsForCommit->produceSnapshot(),
_gidToLidChangeHandler.grab_pending_changes(),
- std::move(onDone)));
+ onDone));
}
void
@@ -615,7 +614,7 @@ void
StoreOnlyFeedView::removeIndexedFields(SerialNum , const LidVector &, OnWriteDoneType ) {}
size_t
-StoreOnlyFeedView::removeDocuments(const RemoveDocumentsOperation &op, bool remove_index_and_attributes)
+StoreOnlyFeedView::removeDocuments(const RemoveDocumentsOperation &op, bool remove_index_and_attributes, DoneCallback onWriteDone)
{
const SerialNum serialNum = op.getSerialNum();
const LidVectorContext::SP &ctx = op.getLidsToRemove(_params._subDbId);
@@ -632,10 +631,7 @@ StoreOnlyFeedView::removeDocuments(const RemoveDocumentsOperation &op, bool remo
_metaStore.removeBatch(lidsToRemove, ctx->getDocIdLimit());
_lidReuseDelayer.delayReuse(lidsToRemove);
}
- std::shared_ptr<vespalib::IDestructorCallback> onWriteDone;
- vespalib::Executor::Task::UP removeBatchDoneTask;
- removeBatchDoneTask = makeLambdaTask([]() {});
- onWriteDone = std::make_shared<search::ScheduleTaskCallback>(_writeService.master(), std::move(removeBatchDoneTask));
+
if (remove_index_and_attributes) {
removeIndexedFields(serialNum, lidsToRemove, onWriteDone);
removeAttributes(serialNum, lidsToRemove, onWriteDone);
@@ -661,15 +657,15 @@ StoreOnlyFeedView::prepareDeleteBucket(DeleteBucketOperation &delOp)
}
void
-StoreOnlyFeedView::handleDeleteBucket(const DeleteBucketOperation &delOp)
+StoreOnlyFeedView::handleDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone)
{
- internalDeleteBucket(delOp);
+ internalDeleteBucket(delOp, onDone);
}
void
-StoreOnlyFeedView::internalDeleteBucket(const DeleteBucketOperation &delOp)
+StoreOnlyFeedView::internalDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone)
{
- size_t rm_count = removeDocuments(delOp, true);
+ size_t rm_count = removeDocuments(delOp, true, onDone);
LOG(debug, "internalDeleteBucket(): docType(%s), bucket(%s), lidsToRemove(%zu)",
_params._docTypeName.toString().c_str(), delOp.getBucketId().toString().c_str(), rm_count);
}
@@ -687,7 +683,7 @@ StoreOnlyFeedView::prepareMove(MoveOperation &moveOp)
// CombiningFeedView calls this for both source and target subdb.
void
-StoreOnlyFeedView::handleMove(const MoveOperation &moveOp, IDestructorCallback::SP doneCtx)
+StoreOnlyFeedView::handleMove(const MoveOperation &moveOp, DoneCallback doneCtx)
{
assert(moveOp.getValidDbdId());
assert(moveOp.getValidPrevDbdId());
@@ -716,7 +712,7 @@ StoreOnlyFeedView::handleMove(const MoveOperation &moveOp, IDestructorCallback::
putIndexedFields(serialNum, moveOp.getLid(), doc, onWriteDone);
}
if (docAlreadyExists && moveOp.changedDbdId()) {
- internalRemove(std::move(doneCtx), _pendingLidsForCommit->produce(moveOp.getPrevLid()), serialNum, moveOp.getPrevLid());
+ internalRemove(doneCtx, _pendingLidsForCommit->produce(moveOp.getPrevLid()), serialNum, moveOp.getPrevLid());
}
}
@@ -734,11 +730,11 @@ StoreOnlyFeedView::heartBeat(SerialNum serialNum, DoneCallback onDone)
// CombiningFeedView calls this only for the removed subdb.
void
StoreOnlyFeedView::
-handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp)
+handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp, DoneCallback onDone)
{
assert(_params._subDbType == SubDbType::REMOVED);
assert(pruneOp.getSubDbId() == _params._subDbId);
- uint32_t rm_count = removeDocuments(pruneOp, false);
+ uint32_t rm_count = removeDocuments(pruneOp, false, onDone);
LOG(debug, "MinimalFeedView::handlePruneRemovedDocuments called, doctype(%s) %u lids pruned, limit %u",
_params._docTypeName.toString().c_str(), rm_count,
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h
index 4b4082a8aa5..c25accaf4a4 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h
@@ -53,7 +53,7 @@ public:
using LidVector = LidVectorContext::LidVector;
using Document = document::Document;
using DocumentUpdate = document::DocumentUpdate;
- using OnWriteDoneType = const std::shared_ptr<vespalib::IDestructorCallback> &;
+ using OnWriteDoneType = DoneCallback;
using OnForceCommitDoneType =const std::shared_ptr<ForceCommitContext> &;
using OnOperationDoneType = const std::shared_ptr<OperationDoneContext> &;
using OnPutDoneType = const std::shared_ptr<PutDoneContext> &;
@@ -65,7 +65,6 @@ public:
using DocumentSP = std::shared_ptr<Document>;
using DocumentUpdateSP = std::shared_ptr<DocumentUpdate>;
using LidReuseDelayer = documentmetastore::LidReuseDelayer;
- using IDestructorCallbackSP = std::shared_ptr<vespalib::IDestructorCallback>;
using Lid = search::DocumentIdT;
@@ -179,7 +178,7 @@ private:
// Removes documents from meta store and document store.
// returns the number of documents removed.
- size_t removeDocuments(const RemoveDocumentsOperation &op, bool remove_index_and_attribute_fields);
+ size_t removeDocuments(const RemoveDocumentsOperation &op, bool remove_index_and_attribute_fields, DoneCallback onDone);
void internalRemove(IDestructorCallbackSP token, IPendingLidTracker::Token uncommitted, SerialNum serialNum, Lid lid);
@@ -189,7 +188,7 @@ private:
PromisedDoc promisedDoc, PromisedStream promisedStream);
protected:
- virtual void internalDeleteBucket(const DeleteBucketOperation &delOp);
+ virtual void internalDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone);
virtual void heartBeatIndexedFields(SerialNum serialNum, DoneCallback onDone);
virtual void heartBeatAttributes(SerialNum serialNum, DoneCallback onDone);
@@ -231,9 +230,9 @@ public:
void prepareRemove(RemoveOperation &rmOp) override;
void handleRemove(FeedToken token, const RemoveOperation &rmOp) override;
void prepareDeleteBucket(DeleteBucketOperation &delOp) override;
- void handleDeleteBucket(const DeleteBucketOperation &delOp) override;
+ void handleDeleteBucket(const DeleteBucketOperation &delOp, DoneCallback onDone) override;
void prepareMove(MoveOperation &putOp) override;
- void handleMove(const MoveOperation &putOp, std::shared_ptr<vespalib::IDestructorCallback> doneCtx) override;
+ void handleMove(const MoveOperation &putOp, DoneCallback doneCtx) override;
void heartBeat(search::SerialNum serialNum, DoneCallback onDone) override;
void forceCommit(const CommitParam & param, DoneCallback onDone) override;
@@ -243,7 +242,7 @@ public:
*
* Called by writer thread.
*/
- void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp) override;
+ void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &pruneOp, DoneCallback onDone) override;
void handleCompactLidSpace(const CompactLidSpaceOperation &op, DoneCallback onDone) override;
std::shared_ptr<PendingLidTrackerBase> getUncommittedLidTracker() { return _pendingLidsForCommit; }
};
diff --git a/searchcore/src/vespa/searchcore/proton/test/dummy_feed_view.h b/searchcore/src/vespa/searchcore/proton/test/dummy_feed_view.h
index 7f11bf9ec51..51bb3ebc807 100644
--- a/searchcore/src/vespa/searchcore/proton/test/dummy_feed_view.h
+++ b/searchcore/src/vespa/searchcore/proton/test/dummy_feed_view.h
@@ -25,11 +25,11 @@ struct DummyFeedView : public IFeedView
void prepareRemove(RemoveOperation &) override {}
void handleRemove(FeedToken, const RemoveOperation &) override {}
void prepareDeleteBucket(DeleteBucketOperation &) override {}
- void handleDeleteBucket(const DeleteBucketOperation &) override {}
+ void handleDeleteBucket(const DeleteBucketOperation &, DoneCallback) override {}
void prepareMove(MoveOperation &) override {}
- void handleMove(const MoveOperation &, std::shared_ptr<vespalib::IDestructorCallback>) override {}
+ void handleMove(const MoveOperation &, DoneCallback) override {}
void heartBeat(search::SerialNum, DoneCallback) override {}
- void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &) override {}
+ void handlePruneRemovedDocuments(const PruneRemovedDocumentsOperation &, DoneCallback) override {}
void handleCompactLidSpace(const CompactLidSpaceOperation &, DoneCallback) override {}
void forceCommit(const CommitParam &, DoneCallback) override { }
};
diff --git a/searchcore/src/vespa/searchcore/proton/test/mock_shared_threading_service.h b/searchcore/src/vespa/searchcore/proton/test/mock_shared_threading_service.h
index 976d75f2571..74965c15cd4 100644
--- a/searchcore/src/vespa/searchcore/proton/test/mock_shared_threading_service.h
+++ b/searchcore/src/vespa/searchcore/proton/test/mock_shared_threading_service.h
@@ -2,6 +2,7 @@
#pragma once
#include <vespa/searchcore/proton/server/i_shared_threading_service.h>
+#include <vespa/vespalib/util/invokeserviceimpl.h>
namespace proton {
@@ -9,16 +10,19 @@ class MockSharedThreadingService : public ISharedThreadingService {
private:
vespalib::ThreadExecutor& _warmup;
vespalib::ThreadExecutor& _shared;
+ vespalib::InvokeServiceImpl _invokeService;
public:
MockSharedThreadingService(vespalib::ThreadExecutor& warmup_in,
vespalib::ThreadExecutor& shared_in)
: _warmup(warmup_in),
- _shared(shared_in)
+ _shared(shared_in),
+ _invokeService(10ms)
{}
vespalib::ThreadExecutor& warmup() override { return _warmup; }
vespalib::ThreadExecutor& shared() override { return _shared; }
vespalib::ISequencedTaskExecutor* field_writer() override { return nullptr; }
+ vespalib::InvokeService & invokeService() override { return _invokeService; }
};
}
diff --git a/staging_vespalib/src/vespa/vespalib/util/isequencedtaskexecutor.h b/staging_vespalib/src/vespa/vespalib/util/isequencedtaskexecutor.h
index 0e931838279..3fe6fb5d678 100644
--- a/staging_vespalib/src/vespa/vespalib/util/isequencedtaskexecutor.h
+++ b/staging_vespalib/src/vespa/vespalib/util/isequencedtaskexecutor.h
@@ -14,7 +14,7 @@ namespace vespalib {
* Interface class to run multiple tasks in parallel, but tasks with same
* id has to be run in sequence.
*/
-class ISequencedTaskExecutor
+class ISequencedTaskExecutor : public vespalib::IWakeup
{
public:
class ExecutorId {
@@ -62,7 +62,7 @@ public:
/**
* Call this one to ensure you get the attention of the workers.
*/
- virtual void wakeup() { }
+ void wakeup() override { }
/**
* Wrap lambda function into a task and schedule it to be run.
diff --git a/storage/src/tests/distributor/mergeoperationtest.cpp b/storage/src/tests/distributor/mergeoperationtest.cpp
index 54bd06c98e0..e2b733c0a29 100644
--- a/storage/src/tests/distributor/mergeoperationtest.cpp
+++ b/storage/src/tests/distributor/mergeoperationtest.cpp
@@ -68,6 +68,7 @@ MergeOperationTest::setup_simple_merge_op(const std::vector<uint16_t>& nodes)
auto op = std::make_shared<MergeOperation>(BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)), nodes));
op->setIdealStateManager(&getIdealStateManager());
+ op->setPriority(api::StorageMessage::Priority(125));
op->start(_sender, framework::MilliSecTime(0));
return op;
}
@@ -603,4 +604,14 @@ TEST_F(MergeOperationTest, unordered_merges_only_sent_iff_config_enabled_and_all
_sender.getLastCommand(true));
}
+TEST_F(MergeOperationTest, delete_bucket_inherits_merge_priority) {
+ auto op = setup_simple_merge_op();
+ ASSERT_NO_FATAL_FAILURE(assert_simple_merge_bucket_command());
+ sendReply(*op);
+ ASSERT_NO_FATAL_FAILURE(assert_simple_delete_bucket_command());
+ auto del_cmd = std::dynamic_pointer_cast<api::DeleteBucketCommand>(_sender.commands().back());
+ ASSERT_TRUE(del_cmd);
+ EXPECT_EQ(int(del_cmd->getPriority()), int(op->getPriority()));
+}
+
} // storage::distributor
diff --git a/storage/src/tests/distributor/statecheckerstest.cpp b/storage/src/tests/distributor/statecheckerstest.cpp
index f5531a134d0..d481370b2c1 100644
--- a/storage/src/tests/distributor/statecheckerstest.cpp
+++ b/storage/src/tests/distributor/statecheckerstest.cpp
@@ -858,7 +858,7 @@ TEST_F(StateCheckersTest, delete_extra_copies) {
EXPECT_EQ("[Removing all copies since bucket is empty:node(idx=0,crc=0x0,"
"docs=0/0,bytes=0/0,trusted=false,active=false,ready=false)]"
- " (pri 100)",
+ " (pri 120)",
testDeleteExtraCopies("0=0", 2, PendingMessage(), "", true)) << "Remove empty buckets";
EXPECT_EQ("[Removing redundant in-sync copy from node 2]",
diff --git a/storage/src/vespa/storage/config/stor-distributormanager.def b/storage/src/vespa/storage/config/stor-distributormanager.def
index 8021075faa3..5162e337f24 100644
--- a/storage/src/vespa/storage/config/stor-distributormanager.def
+++ b/storage/src/vespa/storage/config/stor-distributormanager.def
@@ -103,7 +103,7 @@ priority_activate_no_existing_active int default=100
priority_activate_with_existing_active int default=100
## Deletion of bucket copy.
-priority_delete_bucket_copy int default=100
+priority_delete_bucket_copy int default=120
## Joining caused by bucket siblings getting sufficiently small to fit into a
## single bucket.
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
index d220a71966f..6624ca7c8c6 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
@@ -253,6 +253,7 @@ MergeOperation::deleteSourceOnlyNodes(
return;
}
_removeOperation->setIdealStateManager(_manager);
+ _removeOperation->setPriority(getPriority());
if (_removeOperation->onStartInternal(sender)) {
_ok = _removeOperation->ok();
diff --git a/vespalib/CMakeLists.txt b/vespalib/CMakeLists.txt
index 2f657b1bed0..d4c7bc881a5 100644
--- a/vespalib/CMakeLists.txt
+++ b/vespalib/CMakeLists.txt
@@ -150,6 +150,7 @@ vespa_define_module(
src/tests/util/size_literals
src/tests/valgrind
src/tests/visit_ranges
+ src/tests/invokeservice
src/tests/wakeup
src/tests/websocket
src/tests/zcurve
diff --git a/vespalib/src/tests/invokeservice/CMakeLists.txt b/vespalib/src/tests/invokeservice/CMakeLists.txt
new file mode 100644
index 00000000000..a7d7dca806e
--- /dev/null
+++ b/vespalib/src/tests/invokeservice/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_invokeservice_test_app TEST
+ SOURCES
+ invokeservice_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_invokeservice_test_app COMMAND vespalib_invokeservice_test_app)
+
diff --git a/vespalib/src/tests/invokeservice/invokeservice_test.cpp b/vespalib/src/tests/invokeservice/invokeservice_test.cpp
new file mode 100644
index 00000000000..22063281950
--- /dev/null
+++ b/vespalib/src/tests/invokeservice/invokeservice_test.cpp
@@ -0,0 +1,62 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/util/invokeserviceimpl.h>
+
+using namespace vespalib;
+
+struct InvokeCounter {
+ InvokeCounter() : _count(0) {}
+ void inc() noexcept { _count++; }
+ void wait_for_atleast(uint64_t n) {
+ while (_count <= n) {
+ std::this_thread::sleep_for(1ms);
+ }
+ }
+ std::atomic<uint64_t> _count;
+};
+
+TEST("require that wakeup is called") {
+ InvokeCounter a;
+ InvokeServiceImpl service(1ms);
+ EXPECT_EQUAL(0u, a._count);
+ auto ra = service.registerInvoke([&a]() noexcept { a.inc(); });
+ EXPECT_TRUE(ra);
+ a.wait_for_atleast(1);
+ ra.reset();
+ uint64_t countAtStop = a._count;
+ std::this_thread::sleep_for(1s);
+ EXPECT_EQUAL(countAtStop, a._count);
+}
+
+TEST("require that same wakeup can be registered multiple times.") {
+ InvokeCounter a;
+ InvokeCounter b;
+ InvokeCounter c;
+ InvokeServiceImpl service(1ms);
+ EXPECT_EQUAL(0u, a._count);
+ auto ra1 = service.registerInvoke([&a]() noexcept { a.inc(); });
+ EXPECT_TRUE(ra1);
+ auto rb = service.registerInvoke([&b]() noexcept { b.inc(); });
+ EXPECT_TRUE(rb);
+ auto rc = service.registerInvoke([&c]() noexcept { c.inc(); });
+ EXPECT_TRUE(rc);
+ a.wait_for_atleast(1);
+ b.wait_for_atleast(1);
+ c.wait_for_atleast(1);
+ auto ra2 = service.registerInvoke([&a]() noexcept { a.inc(); });
+ EXPECT_TRUE(ra2);
+
+ rb.reset();
+ uint64_t countAtStop = b._count;
+ uint64_t a_count = a._count;
+ uint64_t c_count = c._count;
+ std::this_thread::sleep_for(1s);
+ EXPECT_EQUAL(countAtStop, b._count);
+
+ uint64_t diff_c = c._count - c_count;
+ uint64_t diff_a = a._count - a_count;
+ EXPECT_LESS((diff_c*3)/2, diff_a); // diff_c*3/2 should still be smaller than diff_a(2x)
+}
+
+
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/vespa/vespalib/util/CMakeLists.txt b/vespalib/src/vespa/vespalib/util/CMakeLists.txt
index 75ea02d448e..58f6a93babc 100644
--- a/vespalib/src/vespa/vespalib/util/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/util/CMakeLists.txt
@@ -30,6 +30,7 @@ vespa_add_library(vespalib_vespalib_util OBJECT
generationholder.cpp
hdr_abort.cpp
host_name.cpp
+ invokeserviceimpl.cpp
issue.cpp
joinable.cpp
latch.cpp
diff --git a/vespalib/src/vespa/vespalib/util/executor.h b/vespalib/src/vespa/vespalib/util/executor.h
index 6ef8f182ec4..6346b51c2ab 100644
--- a/vespalib/src/vespa/vespalib/util/executor.h
+++ b/vespalib/src/vespa/vespalib/util/executor.h
@@ -7,11 +7,23 @@
namespace vespalib {
/**
+ * Interface for componets that can benefit from regular wakeup calls.
+ */
+class IWakeup {
+public:
+ virtual ~IWakeup() = default;
+ /**
+ * In case you have a lazy executor that naps inbetween.
+ **/
+ virtual void wakeup() = 0;
+};
+
+/**
* An executor decouples the execution of a task from the request of
* executing that task. Also, tasks are typically executed
* concurrently in multiple threads.
**/
-class Executor
+class Executor : public IWakeup
{
public:
/**
@@ -37,10 +49,6 @@ public:
**/
virtual Task::UP execute(Task::UP task) = 0;
- /**
- * In case you have a lazy executor that naps inbetween.
- **/
- virtual void wakeup() = 0;
virtual ~Executor() = default;
};
diff --git a/vespalib/src/vespa/vespalib/util/invokeservice.h b/vespalib/src/vespa/vespalib/util/invokeservice.h
new file mode 100644
index 00000000000..3e3973234d1
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/util/invokeservice.h
@@ -0,0 +1,20 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "idestructorcallback.h"
+#include <functional>
+
+namespace vespalib {
+
+/**
+ * Interface to register for receiving regular invoke calls.
+ * The registration will last as long as the returned object is kept alive.
+ **/
+class InvokeService {
+public:
+ virtual ~InvokeService() = default;
+ virtual std::unique_ptr<IDestructorCallback> registerInvoke(std::function<void()> func) = 0;
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/util/invokeserviceimpl.cpp b/vespalib/src/vespa/vespalib/util/invokeserviceimpl.cpp
new file mode 100644
index 00000000000..ffa0825c950
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/util/invokeserviceimpl.cpp
@@ -0,0 +1,86 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "invokeserviceimpl.h"
+#include <cassert>
+
+namespace vespalib {
+
+InvokeServiceImpl::InvokeServiceImpl(duration napTime)
+ : _naptime(napTime),
+ _lock(),
+ _currId(0),
+ _closed(false),
+ _toInvoke(),
+ _thread()
+{
+}
+
+InvokeServiceImpl::~InvokeServiceImpl()
+{
+ {
+ std::lock_guard guard(_lock);
+ assert(_toInvoke.empty());
+ _closed = true;
+ }
+ if (_thread) {
+ _thread->join();
+ }
+}
+
+class InvokeServiceImpl::Registration : public IDestructorCallback {
+public:
+ Registration(InvokeServiceImpl * service, uint64_t id) noexcept
+ : _service(service),
+ _id(id)
+ { }
+ Registration(const Registration &) = delete;
+ Registration & operator=(const Registration &) = delete;
+ ~Registration() override{
+ _service->unregister(_id);
+ }
+private:
+ InvokeServiceImpl * _service;
+ uint64_t _id;
+};
+
+std::unique_ptr<IDestructorCallback>
+InvokeServiceImpl::registerInvoke(VoidFunc func) {
+ std::lock_guard guard(_lock);
+ uint64_t id = _currId++;
+ _toInvoke.emplace_back(id, std::move(func));
+ if ( ! _thread) {
+ _thread = std::make_unique<std::thread>([this]() { runLoop(); });
+ }
+ return std::make_unique<Registration>(this, id);
+}
+
+void
+InvokeServiceImpl::unregister(uint64_t id) {
+ std::lock_guard guard(_lock);
+ auto found = std::find_if(_toInvoke.begin(), _toInvoke.end(), [id](const std::pair<uint64_t, VoidFunc> & a) {
+ return id == a.first;
+ });
+ assert (found != _toInvoke.end());
+ _toInvoke.erase(found);
+}
+
+void
+InvokeServiceImpl::runLoop() {
+ bool done = false;
+ while ( ! done ) {
+ {
+ std::lock_guard guard(_lock);
+ for (auto & func: _toInvoke) {
+ func.second();
+ }
+ done = _closed;
+ }
+ if ( ! done) {
+ std::this_thread::sleep_for(_naptime);
+ }
+ }
+
+}
+
+}
+
diff --git a/vespalib/src/vespa/vespalib/util/invokeserviceimpl.h b/vespalib/src/vespa/vespalib/util/invokeserviceimpl.h
new file mode 100644
index 00000000000..3b0c7690731
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/util/invokeserviceimpl.h
@@ -0,0 +1,36 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "invokeservice.h"
+#include "time.h"
+#include <mutex>
+#include <vector>
+#include <thread>
+
+namespace vespalib {
+
+/**
+ * An invoke service what will invoke the given function with at specified frequency.
+ */
+class InvokeServiceImpl : public InvokeService {
+ using VoidFunc = std::function<void()>;
+public:
+ InvokeServiceImpl(duration napTime);
+ InvokeServiceImpl(const InvokeServiceImpl &) = delete;
+ InvokeServiceImpl & operator=(const InvokeServiceImpl &) = delete;
+ ~InvokeServiceImpl() override;
+ std::unique_ptr<IDestructorCallback> registerInvoke(VoidFunc func) override;
+private:
+ class Registration;
+ void unregister(uint64_t id);
+ void runLoop();
+ duration _naptime;
+ std::mutex _lock;
+ uint64_t _currId;
+ bool _closed;
+ std::vector<std::pair<uint64_t, VoidFunc>> _toInvoke;
+ std::unique_ptr<std::thread> _thread;
+};
+
+}