summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassTest.java13
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java4
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java6
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidators.java28
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/SimpleApplicationValidator.java2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java1
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/DeployProperties.java17
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/test/TestDriver.java3
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/DocumentOnlySearch.java (renamed from config-model/src/main/java/com/yahoo/searchdefinition/UnproperSearch.java)13
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/UnprocessingSearchBuilder.java44
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/Deriver.java33
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java15
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java4
-rw-r--r--config-model/src/main/javacc/SDParser.jj4
-rw-r--r--config-model/src/test/derived/documentderiver/compression_body.sd20
-rw-r--r--config-model/src/test/derived/documentderiver/compression_both.sd26
-rw-r--r--config-model/src/test/derived/documentderiver/compression_header.sd20
-rw-r--r--config-model/src/test/derived/documentderiver/documentmanager.cfg394
-rw-r--r--config-model/src/test/derived/documentderiver/mail.sd112
-rw-r--r--config-model/src/test/derived/documentderiver/music.sd44
-rw-r--r--config-model/src/test/derived/documentderiver/newsarticle.sd126
-rw-r--r--config-model/src/test/derived/documentderiver/newssummary.sd165
-rw-r--r--config-model/src/test/derived/documentderiver/sombrero.sd36
-rw-r--r--config-model/src/test/derived/documentderiver/vsmfields.cfg390
-rw-r--r--config-model/src/test/derived/documentderiver/vsmsummary.cfg4
-rw-r--r--config-model/src/test/derived/inheritancebadtypes/child.sd8
-rw-r--r--config-model/src/test/derived/inheritancebadtypes/parent.sd8
-rw-r--r--config-model/src/test/examples/attributeindex.sd24
-rw-r--r--config-model/src/test/examples/attributeproperties1.sd21
-rw-r--r--config-model/src/test/examples/attributeproperties2.sd27
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/MockModelContext.java3
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/application/provider/SchemaValidatorTest.java3
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java11
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/ArraysTestCase.java3
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/ReservedWordsAsFieldNamesTestCase.java2
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/SearchImporterTestCase.java22
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/StemmingSettingTestCase.java2
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/DocumentDeriverTestCase.java114
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/InheritanceTestCase.java14
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/MailTestCase.java32
-rwxr-xr-xconfig-model/src/test/java/com/yahoo/searchdefinition/derived/StreamingStructTestCase.java11
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/AttributeIndexTestCase.java34
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/AttributePropertiesTestCase.java40
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/BoldingTestCase.java4
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/IntegerIndex2AttributeTestCase.java5
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/SummaryFieldsMustHaveValidSourceTestCase.java16
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java3
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java7
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithFilePkg.java3
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/NodeType.java5
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/ConfigVerification.java3
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/classes/app.1.def7
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/classes/qr-templates.3.def141
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/classes/ranges.1.def4
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/classes/testfoobar.12.def918
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/configsglobal/qr-templates.3.cfg111
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/configsglobal/testfoobar.12.cfg105
-rw-r--r--configdefinitions/src/vespa/configserver.def1
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java8
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintenance.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java13
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java18
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java17
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/provision/StaticProvisioner.java8
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantListener.java4
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java4
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/TestComponentRegistry.java27
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandlerTest.java7
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java2
-rw-r--r--container-core/src/main/java/com/yahoo/container/handler/LogHandler.java11
-rw-r--r--container-core/src/main/java/com/yahoo/container/handler/LogReader.java10
-rw-r--r--container-core/src/test/java/com/yahoo/container/handler/LogHandlerTest.java54
-rw-r--r--container-core/src/test/java/com/yahoo/container/handler/LogReaderTest.java8
-rw-r--r--container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java1
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/fastsearch/FS4CloseableChannel.java137
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/fastsearch/FastSearcher.java44
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/fastsearch/VespaBackEndSearcher.java17
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/CloseableChannel.java28
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java39
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/InterleavedCloseableChannel.java98
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/LoadBalancer.java5
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/templates/test/qr-templates.cfg104
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java15
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/MetricsService.java14
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockOrganization.java12
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/GlobalRoutingService.java16
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/MemoryGlobalRoutingService.java20
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java90
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java17
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java13
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java80
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Deployment.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentMetrics.java12
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/RotationStatus.java20
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java46
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java21
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java105
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java38
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java57
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/ControllerAuthorizationFilter.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java25
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/MetricsServiceMock.java34
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java7
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java64
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java85
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java19
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java1
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java130
-rw-r--r--docker-api/pom.xml6
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerResources.java12
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStats.java (renamed from docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStatsImpl.java)6
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java16
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java22
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImageGarbageCollector.java189
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java165
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/ContainerNotFoundException.java13
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/DockerException.java (renamed from docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerException.java)4
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/DockerExecTimeoutException.java (renamed from docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerExecTimeoutException.java)6
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/package-info.java5
-rw-r--r--docker-api/src/main/resources/configdefinitions/docker.def4
-rw-r--r--docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImageGarbageCollectionTest.java184
-rw-r--r--docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImplTest.java13
-rw-r--r--fnet/src/tests/frt/rpc/invoke.cpp907
-rw-r--r--model-evaluation/src/main/java/ai/vespa/models/handler/ModelsEvaluationHandler.java131
-rw-r--r--model-evaluation/src/main/java/ai/vespa/models/handler/package-info.java4
-rw-r--r--model-evaluation/src/test/java/ai/vespa/models/handler/ModelsEvaluationHandlerTest.java10
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java9
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java51
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java44
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java18
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java15
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisioner.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureVersions.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java20
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java1
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisionerTest.java54
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java20
-rw-r--r--parent/pom.xml2
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdb_test.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/config/proton.def4
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp19
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/document_db_explorer.cpp9
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/document_meta_store_read_guards.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/document_meta_store_read_guards.h8
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.cpp324
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.h11
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp415
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.h61
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h1
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/BatchNormImportTestCase.java2
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/DropoutImportTestCase.java2
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/OnnxMnistSoftmaxImportTestCase.java2
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/TensorFlowMnistSoftmaxImportTestCase.java2
-rw-r--r--searchlib/src/tests/docstore/document_store/document_store_test.cpp67
-rw-r--r--searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp18
-rw-r--r--searchlib/src/vespa/searchlib/docstore/CMakeLists.txt1
-rw-r--r--searchlib/src/vespa/searchlib/docstore/documentstore.cpp225
-rw-r--r--searchlib/src/vespa/searchlib/docstore/documentstore.h24
-rw-r--r--searchlib/src/vespa/searchlib/docstore/value.cpp75
-rw-r--r--searchlib/src/vespa/searchlib/docstore/value.h57
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/featurestore.cpp15
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/featurestore.h98
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/iordereddocumentinserter.h7
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/ordereddocumentinserter.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/memoryindex/ordereddocumentinserter.h21
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/application/ControllerApplication.java18
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/application/HostedVespaApplication.java6
-rw-r--r--vespalib/CMakeLists.txt1
-rw-r--r--vespalib/src/tests/net/tls/direct_buffer_bio/CMakeLists.txt10
-rw-r--r--vespalib/src/tests/net/tls/direct_buffer_bio/direct_buffer_bio_test.cpp138
-rw-r--r--vespalib/src/vespa/vespalib/net/crypto_engine.cpp5
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/impl/CMakeLists.txt1
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/impl/direct_buffer_bio.cpp418
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/impl/direct_buffer_bio.h90
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/impl/openssl_crypto_codec_impl.cpp213
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/impl/openssl_crypto_codec_impl.h16
188 files changed, 3533 insertions, 5719 deletions
diff --git a/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassTest.java b/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassTest.java
index aba6e8f14e8..e9df42f24f4 100644
--- a/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassTest.java
+++ b/bundle-plugin/src/test/java/com/yahoo/container/plugin/classanalysis/AnalyzeClassTest.java
@@ -12,6 +12,8 @@ import com.yahoo.container.plugin.classanalysis.sampleclasses.MethodAnnotation;
import com.yahoo.container.plugin.classanalysis.sampleclasses.MethodInvocation;
import com.yahoo.osgi.annotation.ExportPackage;
import com.yahoo.osgi.annotation.Version;
+import org.hamcrest.Matcher;
+import org.hamcrest.Matchers;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
@@ -27,7 +29,6 @@ import static com.yahoo.container.plugin.classanalysis.TestUtilities.classFile;
import static com.yahoo.container.plugin.classanalysis.TestUtilities.name;
import static com.yahoo.container.plugin.classanalysis.TestUtilities.throwableMessage;
import static org.hamcrest.Matchers.allOf;
-import static org.hamcrest.Matchers.anyOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.is;
@@ -41,6 +42,7 @@ import static org.junit.Assert.assertThat;
* @author ollivir
*/
public class AnalyzeClassTest {
+
@Test
public void require_that_full_class_name_is_returned() {
assertThat(analyzeClass(Base.class).getName(), is(name(Base.class)));
@@ -82,12 +84,14 @@ public class AnalyzeClassTest {
@Test
public void require_that_basic_types_ignored() {
- assertThat(analyzeClass(Interface1.class).getReferencedClasses(), not(anyOf(hasItem("int"), hasItem("float"))));
+ assertThat(analyzeClass(Interface1.class).getReferencedClasses(),
+ not(Matchers.<Iterable<? super String>>anyOf(hasItem("int"), hasItem("float"))));
}
@Test
public void require_that_arrays_of_basic_types_ignored() {
- assertThat(analyzeClass(Interface1.class).getReferencedClasses(), not(anyOf(hasItem("int[]"), hasItem("int[][]"))));
+ assertThat(analyzeClass(Interface1.class).getReferencedClasses(),
+ not(Matchers.<Iterable<? super String>>anyOf(hasItem("int[]"), hasItem("int[][]"))));
}
@Test
@@ -118,7 +122,8 @@ public class AnalyzeClassTest {
@Test
public void require_that_export_package_annotations_are_ignored() {
assertThat(Analyze.analyzeClass(classFile("com.yahoo.container.plugin.classanalysis.sampleclasses.package-info"))
- .getReferencedClasses(), not(anyOf(hasItem(name(ExportPackage.class)), hasItem(name(Version.class)))));
+ .getReferencedClasses(), not(Matchers.<Iterable<? super String>>anyOf(
+ hasItem(name(ExportPackage.class)), hasItem(name(Version.class)))));
}
@Test
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java
index 8e9c5c0b509..74ade9d8e14 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java
@@ -26,9 +26,9 @@ public class ApplicationPackageXmlFilesValidator {
private static final FilenameFilter xmlFilter = (dir, name) -> name.endsWith(".xml");
- public ApplicationPackageXmlFilesValidator(AppSubDirs appDirs, Version vespaVersion) {
+ private ApplicationPackageXmlFilesValidator(AppSubDirs appDirs, Version vespaVersion) {
this.appDirs = appDirs;
- this.validators = new SchemaValidators(vespaVersion, new BaseDeployLogger());
+ this.validators = new SchemaValidators(vespaVersion);
}
public static ApplicationPackageXmlFilesValidator create(File appDir, Version vespaVersion) {
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java
index 50268fc1e08..16469bb13ae 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java
@@ -82,15 +82,15 @@ public class SchemaValidator {
private class CustomErrorHandler implements ErrorHandler {
volatile String fileName;
- public void warning(SAXParseException e) throws SAXException {
+ public void warning(SAXParseException e) {
deployLogger.log(Level.WARNING, message(e));
}
- public void error(SAXParseException e) throws SAXException {
+ public void error(SAXParseException e) {
throw new IllegalArgumentException(message(e));
}
- public void fatalError(SAXParseException e) throws SAXException {
+ public void fatalError(SAXParseException e) {
throw new IllegalArgumentException(message(e));
}
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidators.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidators.java
index 783f7361ad5..a28f771c37b 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidators.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidators.java
@@ -2,7 +2,6 @@
package com.yahoo.config.model.application.provider;
import com.yahoo.component.Version;
-import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.io.IOUtils;
import com.yahoo.log.LogLevel;
import org.osgi.framework.Bundle;
@@ -40,8 +39,6 @@ public class SchemaValidators {
private static final String routingStandaloneXmlSchemaName = "routing-standalone.rnc";
- private final DeployLogger deployLogger;
-
private final SchemaValidator servicesXmlValidator;
private final SchemaValidator hostsXmlValidator;
private final SchemaValidator deploymentXmlValidator;
@@ -54,8 +51,7 @@ public class SchemaValidators {
*
* @param vespaVersion the version of Vespa we should validate against
*/
- public SchemaValidators(Version vespaVersion, DeployLogger logger) {
- this.deployLogger = logger;
+ public SchemaValidators(Version vespaVersion) {
File schemaDir = null;
try {
schemaDir = saveSchemasFromJar(new File(SchemaValidators.schemaDirBase), vespaVersion);
@@ -75,15 +71,6 @@ public class SchemaValidators {
}
}
- /**
- * Initializes the validator by using the given file as schema file
- *
- * @param vespaVersion the version of Vespa we should validate against
- */
- public SchemaValidators(Version vespaVersion) {
- this(vespaVersion, new BaseDeployLogger());
- }
-
public SchemaValidator servicesXmlValidator() {
return servicesXmlValidator;
}
@@ -104,7 +91,7 @@ public class SchemaValidators {
return containerIncludeXmlValidator;
}
- public SchemaValidator routingStandaloneXmlValidator() {
+ SchemaValidator routingStandaloneXmlValidator() {
return routingStandaloneXmlValidator;
}
@@ -114,22 +101,20 @@ public class SchemaValidators {
* @return the directory the schema files are stored in
* @throws IOException if it is not possible to read schema files
*/
- File saveSchemasFromJar(File tmpBase, Version vespaVersion) throws IOException {
+ private File saveSchemasFromJar(File tmpBase, Version vespaVersion) throws IOException {
final Class<? extends SchemaValidators> schemaValidatorClass = this.getClass();
final ClassLoader classLoader = schemaValidatorClass.getClassLoader();
Enumeration<URL> uris = classLoader.getResources("schema");
if (uris == null) return null;
File tmpDir = java.nio.file.Files.createTempDirectory(tmpBase.toPath(), "vespa").toFile();
- log.log(LogLevel.DEBUG, "Will save all XML schemas to " + tmpDir);
+ log.log(LogLevel.DEBUG, "Will save all XML schemas found in jar to " + tmpDir);
while (uris.hasMoreElements()) {
URL u = uris.nextElement();
log.log(LogLevel.DEBUG, "uri for resource 'schema'=" + u.toString());
if ("jar".equals(u.getProtocol())) {
JarURLConnection jarConnection = (JarURLConnection) u.openConnection();
JarFile jarFile = jarConnection.getJarFile();
- for (Enumeration<JarEntry> entries = jarFile.entries();
- entries.hasMoreElements(); ) {
-
+ for (Enumeration<JarEntry> entries = jarFile.entries(); entries.hasMoreElements(); ) {
JarEntry je = entries.nextElement();
if (je.getName().startsWith("schema/") && je.getName().endsWith(".rnc")) {
writeContentsToFile(tmpDir, je.getName(), jarFile.getInputStream(je));
@@ -168,7 +153,6 @@ public class SchemaValidators {
return tmpDir;
}
- // TODO: This only copies schema for services.xml. Why?
private static void copySchemas(File from, File to) throws IOException {
// TODO: only copy .rnc files.
if (! from.exists()) throw new IOException("Could not find schema source directory '" + from + "'");
@@ -187,7 +171,7 @@ public class SchemaValidators {
private SchemaValidator createValidator(File schemaDir, String schemaFile) {
try {
File file = new File(schemaDir + File.separator + "schema" + File.separator + schemaFile);
- return new SchemaValidator(file, deployLogger);
+ return new SchemaValidator(file, new BaseDeployLogger());
} catch (SAXException e) {
throw new RuntimeException("Invalid schema '" + schemaFile + "'", e);
} catch (IOException e) {
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SimpleApplicationValidator.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SimpleApplicationValidator.java
index 9db254bc742..1284d315058 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SimpleApplicationValidator.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SimpleApplicationValidator.java
@@ -14,6 +14,6 @@ import java.io.Reader;
public class SimpleApplicationValidator {
public static void checkServices(Reader reader, Version version) throws IOException {
- new SchemaValidators(version, new BaseDeployLogger()).servicesXmlValidator().validate(reader);
+ new SchemaValidators(version).servicesXmlValidator().validate(reader);
}
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index c75174cd999..6d5804ab700 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -51,6 +51,7 @@ public interface ModelContext {
Set<Rotation> rotations();
boolean isBootstrap();
boolean isFirstTimeDeployment();
+ boolean useDedicatedNodeForLogserver();
}
}
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployProperties.java
index 53c70399e94..9d9a19bfbd6 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployProperties.java
@@ -27,6 +27,8 @@ public class DeployProperties {
private final Version vespaVersion;
private final boolean isBootstrap;
private final boolean isFirstTimeDeployment;
+ private final boolean useDedicatedNodeForLogserver;
+
private DeployProperties(boolean multitenant,
ApplicationId applicationId,
@@ -37,7 +39,8 @@ public class DeployProperties {
String athenzDnsSuffix,
Version vespaVersion,
boolean isBootstrap,
- boolean isFirstTimeDeployment) {
+ boolean isFirstTimeDeployment,
+ boolean useDedicatedNodeForLogserver) {
this.loadBalancerName = loadBalancerName;
this.ztsUrl = ztsUrl;
this.athenzDnsSuffix = athenzDnsSuffix;
@@ -48,6 +51,7 @@ public class DeployProperties {
this.hostedVespa = hostedVespa;
this.isBootstrap = isBootstrap;
this.isFirstTimeDeployment = isFirstTimeDeployment;
+ this.useDedicatedNodeForLogserver = useDedicatedNodeForLogserver;
}
public boolean multitenant() {
@@ -89,6 +93,8 @@ public class DeployProperties {
/** Returns whether this is the first deployment for this application (used during *prepare*, not set on activate) */
public boolean isFirstTimeDeployment() { return isFirstTimeDeployment; }
+ public boolean useDedicatedNodeForLogserver() { return useDedicatedNodeForLogserver; }
+
public static class Builder {
private ApplicationId applicationId = ApplicationId.defaultId();
@@ -101,6 +107,7 @@ public class DeployProperties {
private Version vespaVersion = Version.fromIntValues(1, 0, 0);
private boolean isBootstrap = false;
private boolean isFirstTimeDeployment = false;
+ private boolean useDedicatedNodeForLogserver = false;
public Builder applicationId(ApplicationId applicationId) {
this.applicationId = applicationId;
@@ -152,9 +159,15 @@ public class DeployProperties {
return this;
}
+ public Builder useDedicatedNodeForLogserver(boolean useDedicatedNodeForLogserver) {
+ this.useDedicatedNodeForLogserver = useDedicatedNodeForLogserver;
+ return this;
+ }
+
public DeployProperties build() {
return new DeployProperties(multitenant, applicationId, configServerSpecs, loadBalancerName, hostedVespa,
- ztsUrl, athenzDnsSuffix, vespaVersion, isBootstrap, isFirstTimeDeployment);
+ ztsUrl, athenzDnsSuffix, vespaVersion, isBootstrap, isFirstTimeDeployment,
+ useDedicatedNodeForLogserver);
}
}
diff --git a/config-model/src/main/java/com/yahoo/config/model/test/TestDriver.java b/config-model/src/main/java/com/yahoo/config/model/test/TestDriver.java
index b538468d0bc..e0047aba2db 100644
--- a/config-model/src/main/java/com/yahoo/config/model/test/TestDriver.java
+++ b/config-model/src/main/java/com/yahoo/config/model/test/TestDriver.java
@@ -9,6 +9,7 @@ import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.application.provider.SchemaValidators;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.builder.xml.ConfigModelBuilder;
+import com.yahoo.vespa.config.VespaVersion;
import com.yahoo.vespa.model.VespaModel;
import org.xml.sax.SAXException;
@@ -103,7 +104,7 @@ public class TestDriver {
if (!validate) {
return;
}
- SchemaValidators schemaValidators = new SchemaValidators(new Version(6), new BaseDeployLogger());
+ SchemaValidators schemaValidators = new SchemaValidators(new Version(VespaVersion.major));
if (appPkg.getHosts() != null) {
schemaValidators.hostsXmlValidator().validate(appPkg.getHosts());
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/UnproperSearch.java b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentOnlySearch.java
index 71d3db6616b..5940f908be8 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/UnproperSearch.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentOnlySearch.java
@@ -5,19 +5,13 @@ import com.yahoo.searchdefinition.document.SDDocumentType;
/**
* A search that was derived from an sd file containing no search element(s), only
- * document specifications.
+ * document specifications, so the name of this is decided by parsing and adding the document instance.
*
* @author vegardh
- *
*/
- // Award for best class name goes to ...
-public class UnproperSearch extends Search {
- // This class exists because the parser accepts SD files without search { ... , and
- // there are unit tests using it too, BUT there are many nullpointer bugs if you try to
- // deploy such a file. Using this class to try to catch those.
- // TODO: Throw away this when we properly support doc-only SD files.
+public class DocumentOnlySearch extends Search {
- public UnproperSearch() {
+ public DocumentOnlySearch() {
// empty
}
@@ -28,4 +22,5 @@ public class UnproperSearch extends Search {
}
super.addDocument(docType);
}
+
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/UnprocessingSearchBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/UnprocessingSearchBuilder.java
deleted file mode 100644
index 6c12c6c94d1..00000000000
--- a/config-model/src/main/java/com/yahoo/searchdefinition/UnprocessingSearchBuilder.java
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.searchdefinition;
-
-import com.yahoo.search.query.profile.QueryProfileRegistry;
-import com.yahoo.searchdefinition.parser.ParseException;
-import com.yahoo.config.application.api.ApplicationPackage;
-import com.yahoo.config.application.api.DeployLogger;
-import com.yahoo.vespa.model.container.search.QueryProfiles;
-
-import java.io.IOException;
-
-/**
- * A SearchBuilder that does not run the processing chain for searches
- */
-public class UnprocessingSearchBuilder extends SearchBuilder {
-
- public UnprocessingSearchBuilder(ApplicationPackage app,
- RankProfileRegistry rankProfileRegistry,
- QueryProfileRegistry queryProfileRegistry) {
- super(app, rankProfileRegistry, queryProfileRegistry);
- }
-
- public UnprocessingSearchBuilder() {
- super();
- }
-
- public UnprocessingSearchBuilder(RankProfileRegistry rankProfileRegistry,
- QueryProfileRegistry queryProfileRegistry) {
- super(rankProfileRegistry, queryProfileRegistry);
- }
-
- @Override
- public void process(Search search, DeployLogger deployLogger, QueryProfiles queryProfiles, boolean validate) {
- // empty
- }
-
- public static Search buildUnprocessedFromFile(String fileName) throws IOException, ParseException {
- SearchBuilder builder = new UnprocessingSearchBuilder();
- builder.importFile(fileName);
- builder.build();
- return builder.getSearch();
- }
-
-}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/Deriver.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/Deriver.java
index e6b6c58cb38..133adb45dd9 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/Deriver.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/Deriver.java
@@ -3,7 +3,6 @@ package com.yahoo.searchdefinition.derived;
import com.yahoo.document.DocumenttypesConfig;
import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.searchdefinition.SearchBuilder;
-import com.yahoo.searchdefinition.UnprocessingSearchBuilder;
import com.yahoo.searchdefinition.parser.ParseException;
import com.yahoo.vespa.configmodel.producers.DocumentManager;
import com.yahoo.vespa.configmodel.producers.DocumentTypes;
@@ -18,25 +17,6 @@ import java.util.List;
*/
public class Deriver {
- /**
- * Derives only document manager.
- *
- *
- * @param sdFileNames The name of the search definition files to derive from.
- * @param toDir The directory to write configuration to.
- * @return The list of Search objects, possibly "unproper ones", from sd files containing only document
- */
- public static SearchBuilder deriveDocuments(List<String> sdFileNames, String toDir) {
- SearchBuilder builder = getUnprocessingSearchBuilder(sdFileNames);
- DocumentmanagerConfig.Builder documentManagerCfg = new DocumentManager().produce(builder.getModel(), new DocumentmanagerConfig.Builder());
- try {
- DerivedConfiguration.exportDocuments(documentManagerCfg, toDir);
- } catch (IOException e) {
- throw new IllegalArgumentException(e);
- }
- return builder;
- }
-
public static SearchBuilder getSearchBuilder(List<String> sds) {
SearchBuilder builder = new SearchBuilder();
try {
@@ -50,19 +30,6 @@ public class Deriver {
return builder;
}
- public static SearchBuilder getUnprocessingSearchBuilder(List<String> sds) {
- SearchBuilder builder = new UnprocessingSearchBuilder();
- try {
- for (String s : sds) {
- builder.importFile(s);
- }
- } catch (ParseException | IOException e) {
- throw new IllegalArgumentException(e);
- }
- builder.build();
- return builder;
- }
-
public static DocumentmanagerConfig.Builder getDocumentManagerConfig(String sd) {
return getDocumentManagerConfig(Collections.singletonList(sd));
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
index 75f70d03fcc..2af9b297e9e 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
@@ -156,6 +156,7 @@ public class VespaModelFactory implements ModelFactory {
.vespaVersion(getVersion())
.isBootstrap(properties.isBootstrap())
.isFirstTimeDeployment(properties.isFirstTimeDeployment())
+ .useDedicatedNodeForLogserver(properties.useDedicatedNodeForLogserver())
.build();
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
index ea943f069cb..5cf55be5534 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.model.builder.xml.dom;
import com.yahoo.config.model.ConfigModelContext;
import com.yahoo.config.model.api.ConfigServerSpec;
+import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.SystemName;
@@ -93,9 +94,13 @@ public class DomAdminV4Builder extends DomAdminBuilderBase {
private NodesSpecification createNodesSpecificationForLogserver() {
// TODO: Enable for main system as well
- //if (context.getDeployState().isHosted() && context.getDeployState().zone().system() == SystemName.cd)
- // return NodesSpecification.dedicated(1, context);
- //else
+ DeployState deployState = context.getDeployState();
+ if (deployState.getProperties().useDedicatedNodeForLogserver() &&
+ context.getApplicationType() == ConfigModelContext.ApplicationType.DEFAULT &&
+ deployState.isHosted() &&
+ deployState.zone().system() == SystemName.cd)
+ return NodesSpecification.dedicated(1, context);
+ else
return NodesSpecification.nonDedicated(1, context);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java
index 11736256d1b..812c38db2fd 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ContainerModelEvaluation.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.model.container;
import ai.vespa.models.evaluation.ModelsEvaluator;
-import ai.vespa.models.handler.ModelsEvaluationHandler;
import com.yahoo.osgi.provider.model.ComponentModel;
import com.yahoo.searchdefinition.derived.RankProfileList;
import com.yahoo.vespa.config.search.RankProfilesConfig;
@@ -19,9 +18,10 @@ import java.util.Objects;
*/
public class ContainerModelEvaluation implements RankProfilesConfig.Producer, RankingConstantsConfig.Producer {
- private final static String EVALUATOR_NAME = ModelsEvaluator.class.getName();
- private final static String REST_HANDLER_NAME = ModelsEvaluationHandler.class.getName();
private final static String BUNDLE_NAME = "model-evaluation";
+ private final static String EVALUATOR_NAME = ModelsEvaluator.class.getName();
+ private final static String REST_HANDLER_NAME = "ai.vespa.models.handler.ModelsEvaluationHandler";
+ private final static String REST_BINDING = "model-evaluation/v1";
/** Global rank profiles, aka models */
private final RankProfileList rankProfileList;
@@ -48,11 +48,10 @@ public class ContainerModelEvaluation implements RankProfilesConfig.Producer, Ra
public static Handler<?> getHandler() {
Handler<?> handler = new Handler<>(new ComponentModel(REST_HANDLER_NAME, null, BUNDLE_NAME));
- String binding = ModelsEvaluationHandler.API_ROOT + "/" + ModelsEvaluationHandler.VERSION_V1;
- handler.addServerBindings("http://*/" + binding,
- "https://*/" + binding,
- "http://*/" + binding + "/*",
- "https://*/" + binding + "/*");
+ handler.addServerBindings("http://*/" + REST_BINDING,
+ "https://*/" + REST_BINDING,
+ "http://*/" + REST_BINDING + "/*",
+ "https://*/" + REST_BINDING + "/*");
return handler;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
index c7762f09851..c46a662b682 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
@@ -9,7 +9,7 @@ import com.yahoo.vespa.config.search.RankProfilesConfig;
import com.yahoo.config.model.producer.AbstractConfigProducer;
import com.yahoo.prelude.fastsearch.DocumentdbInfoConfig;
import com.yahoo.search.config.IndexInfoConfig;
-import com.yahoo.searchdefinition.UnproperSearch;
+import com.yahoo.searchdefinition.DocumentOnlySearch;
import com.yahoo.searchdefinition.derived.DerivedConfiguration;
import com.yahoo.vespa.configdefinition.IlscriptsConfig;
import com.yahoo.vespa.model.HostResource;
@@ -286,7 +286,7 @@ public class IndexedSearchCluster extends SearchCluster
List<com.yahoo.searchdefinition.Search> globalSearches) {
for (SearchDefinitionSpec spec : localSearches) {
com.yahoo.searchdefinition.Search search = spec.getSearchDefinition().getSearch();
- if ( ! (search instanceof UnproperSearch)) {
+ if ( ! (search instanceof DocumentOnlySearch)) {
DocumentDatabase db = new DocumentDatabase(this,
search.getName(),
new DerivedConfiguration(search,
diff --git a/config-model/src/main/javacc/SDParser.jj b/config-model/src/main/javacc/SDParser.jj
index 813d1d47533..c702651b005 100644
--- a/config-model/src/main/javacc/SDParser.jj
+++ b/config-model/src/main/javacc/SDParser.jj
@@ -40,7 +40,7 @@ import com.yahoo.searchdefinition.RankProfileRegistry;
import com.yahoo.searchdefinition.RankProfile.MatchPhaseSettings;
import com.yahoo.searchdefinition.RankProfile.DiversitySettings;
import com.yahoo.searchdefinition.Search;
-import com.yahoo.searchdefinition.UnproperSearch;
+import com.yahoo.searchdefinition.DocumentOnlySearch;
import com.yahoo.searchdefinition.UnrankedRankProfile;
import com.yahoo.searchdefinition.fieldoperation.*;
import com.yahoo.searchlib.rankingexpression.FeatureList;
@@ -457,7 +457,7 @@ Object rootSearchItem(Search search) : { }
*/
Search rootDocument(String dir) :
{
- Search search = new UnproperSearch();
+ Search search = new DocumentOnlySearch();
}
{
( (rootDocumentItem(search) (<NL>)*)*<EOF> )
diff --git a/config-model/src/test/derived/documentderiver/compression_body.sd b/config-model/src/test/derived/documentderiver/compression_body.sd
deleted file mode 100644
index cd114b24017..00000000000
--- a/config-model/src/test/derived/documentderiver/compression_body.sd
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search compressed_body {
-
- document compressed_body {
- body {
- compression {
- level:6
- }
- }
-
- field from type string {
- }
-
- field content type string {
- body
- }
- }
-
-}
-
diff --git a/config-model/src/test/derived/documentderiver/compression_both.sd b/config-model/src/test/derived/documentderiver/compression_both.sd
deleted file mode 100644
index 577aef702d3..00000000000
--- a/config-model/src/test/derived/documentderiver/compression_both.sd
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search compressed_both {
-
- document compressed_both {
- compression {
- threshold:90
- level:9
- }
-
- header {
- compression {
- threshold:50
- level:6
- }
- }
-
- field from type string {
- }
-
- field content type string {
- body
- }
- }
-
-}
-
diff --git a/config-model/src/test/derived/documentderiver/compression_header.sd b/config-model/src/test/derived/documentderiver/compression_header.sd
deleted file mode 100644
index 6ff951b34cb..00000000000
--- a/config-model/src/test/derived/documentderiver/compression_header.sd
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search compressed_header {
-
- document compressed_header {
- header {
- compression {
- level:9
- }
- }
-
- field from type string {
- }
-
- field content type string {
- body
- }
- }
-
-}
-
diff --git a/config-model/src/test/derived/documentderiver/documentmanager.cfg b/config-model/src/test/derived/documentderiver/documentmanager.cfg
deleted file mode 100644
index 2ab99b75aef..00000000000
--- a/config-model/src/test/derived/documentderiver/documentmanager.cfg
+++ /dev/null
@@ -1,394 +0,0 @@
-enablecompression false
-datatype[].id 1381038251
-datatype[].structtype[].name "position"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "x"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "y"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id -843666531
-datatype[].structtype[].name "compressed_body.header"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "from"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id 1704844530
-datatype[].structtype[].name "compressed_body.body"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype LZ4
-datatype[].structtype[].compresslevel 6
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 0
-datatype[].structtype[].field[].name "content"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id 1417245026
-datatype[].documenttype[].name "compressed_body"
-datatype[].documenttype[].version 0
-datatype[].documenttype[].inherits[].name "document"
-datatype[].documenttype[].inherits[].version 0
-datatype[].documenttype[].headerstruct -843666531
-datatype[].documenttype[].bodystruct 1704844530
-datatype[].id -484354914
-datatype[].structtype[].name "compressed_both.header"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype LZ4
-datatype[].structtype[].compresslevel 6
-datatype[].structtype[].compressthreshold 50
-datatype[].structtype[].compressminsize 0
-datatype[].structtype[].field[].name "from"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id -1007627725
-datatype[].structtype[].name "compressed_both.body"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype LZ4
-datatype[].structtype[].compresslevel 9
-datatype[].structtype[].compressthreshold 90
-datatype[].structtype[].compressminsize 0
-datatype[].structtype[].field[].name "content"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id 1417705345
-datatype[].documenttype[].name "compressed_both"
-datatype[].documenttype[].version 0
-datatype[].documenttype[].inherits[].name "document"
-datatype[].documenttype[].inherits[].version 0
-datatype[].documenttype[].headerstruct -484354914
-datatype[].documenttype[].bodystruct -1007627725
-datatype[].id -940182894
-datatype[].structtype[].name "compressed_header.header"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype LZ4
-datatype[].structtype[].compresslevel 9
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 0
-datatype[].structtype[].field[].name "from"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id -579052249
-datatype[].structtype[].name "compressed_header.body"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "content"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id 1946084365
-datatype[].documenttype[].name "compressed_header"
-datatype[].documenttype[].version 0
-datatype[].documenttype[].inherits[].name "document"
-datatype[].documenttype[].inherits[].version 0
-datatype[].documenttype[].headerstruct -940182894
-datatype[].documenttype[].bodystruct -579052249
-datatype[].id -88808602
-datatype[].structtype[].name "mail.header"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "URI"
-datatype[].structtype[].field[].datatype 10
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "mailid"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "date"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "from"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "replyto"
-datatype[].structtype[].field[].datatype 3
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "to"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "cc"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "bcc"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "subject"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id -1244861287
-datatype[].arraytype[].datatype 3
-datatype[].id -953584901
-datatype[].structtype[].name "mail.body"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "mailbody"
-datatype[].structtype[].field[].datatype 3
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "attachmentcount"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "attachmentnames"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "attachmenttypes"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "attachmentlanguages"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "attachmentcontent"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "attachments"
-datatype[].structtype[].field[].datatype -1244861287
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id -1081574983
-datatype[].documenttype[].name "mail"
-datatype[].documenttype[].version 0
-datatype[].documenttype[].inherits[].name "document"
-datatype[].documenttype[].inherits[].version 0
-datatype[].documenttype[].headerstruct -88808602
-datatype[].documenttype[].bodystruct -953584901
-datatype[].id -1486737430
-datatype[].arraytype[].datatype 2
-datatype[].id 519906144
-datatype[].weightedsettype[].datatype 0
-datatype[].weightedsettype[].createifnonexistant false
-datatype[].weightedsettype[].removeifzero false
-datatype[].id 363959257
-datatype[].weightedsettype[].datatype 0
-datatype[].weightedsettype[].createifnonexistant true
-datatype[].weightedsettype[].removeifzero true
-datatype[].id -1910204744
-datatype[].structtype[].name "music.header"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "url"
-datatype[].structtype[].field[].datatype 10
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "title"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "artist"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "year"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "description"
-datatype[].structtype[].field[].datatype 3
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "tracks"
-datatype[].structtype[].field[].datatype -1486737430
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "popularity"
-datatype[].structtype[].field[].datatype 519906144
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "popularity2"
-datatype[].structtype[].field[].datatype 363959257
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "popularity3"
-datatype[].structtype[].field[].datatype 363959257
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id 993120973
-datatype[].structtype[].name "music.body"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].id 1412693671
-datatype[].documenttype[].name "music"
-datatype[].documenttype[].version 0
-datatype[].documenttype[].inherits[].name "document"
-datatype[].documenttype[].inherits[].version 0
-datatype[].documenttype[].headerstruct -1910204744
-datatype[].documenttype[].bodystruct 993120973
-datatype[].id 2006483754
-datatype[].structtype[].name "newssummary.header"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "title"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "abstract"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "sourcename"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "providername"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "thumburl"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "thumbwidth"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "thumbheight"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "language"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "crawldocid"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "url"
-datatype[].structtype[].field[].datatype 10
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "sourceurl"
-datatype[].structtype[].field[].datatype 10
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "categories"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "pubdate"
-datatype[].structtype[].field[].datatype 4
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "expdate"
-datatype[].structtype[].field[].datatype 4
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "fingerprint"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "debug"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "attributes"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "searchcluster"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "eustaticrank"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "usstaticrank"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "asiastaticrank"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id -2059783233
-datatype[].structtype[].name "newssummary.body"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].id -756330891
-datatype[].documenttype[].name "newssummary"
-datatype[].documenttype[].version 0
-datatype[].documenttype[].inherits[].name "document"
-datatype[].documenttype[].inherits[].version 0
-datatype[].documenttype[].headerstruct 2006483754
-datatype[].documenttype[].bodystruct -2059783233
-datatype[].id 2098419674
-datatype[].structtype[].name "newsarticle.header"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "dynabstract"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "othersourcenames"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "author"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "otherlanguages"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "charset"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "mimetype"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "referrerurl"
-datatype[].structtype[].field[].datatype 10
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "sourcelocation"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "sourcecountry"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "sourcelocale"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "sourcecontinent"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "articlecountry"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "articlelocale"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "articlecontinent"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "sourcerank"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "crawldate"
-datatype[].structtype[].field[].datatype 4
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "indexdate"
-datatype[].structtype[].field[].datatype 4
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "procdate"
-datatype[].structtype[].field[].datatype 4
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "sourceid"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].structtype[].field[].name "sourcefeedid"
-datatype[].structtype[].field[].datatype 0
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id 197293167
-datatype[].structtype[].name "newsarticle.body"
-datatype[].structtype[].version 0
-datatype[].structtype[].compresstype NONE
-datatype[].structtype[].compresslevel 0
-datatype[].structtype[].compressthreshold 95
-datatype[].structtype[].compressminsize 800
-datatype[].structtype[].field[].name "body"
-datatype[].structtype[].field[].datatype 2
-datatype[].structtype[].field[].detailedtype ""
-datatype[].id -1710661691
-datatype[].documenttype[].name "newsarticle"
-datatype[].documenttype[].version 0
-datatype[].documenttype[].inherits[].name "document"
-datatype[].documenttype[].inherits[].version 0
-datatype[].documenttype[].inherits[].name "newssummary"
-datatype[].documenttype[].inherits[].version 0
-datatype[].documenttype[].headerstruct 2098419674
-datatype[].documenttype[].bodystruct 197293167
diff --git a/config-model/src/test/derived/documentderiver/mail.sd b/config-model/src/test/derived/documentderiver/mail.sd
deleted file mode 100644
index 87bf8bada45..00000000000
--- a/config-model/src/test/derived/documentderiver/mail.sd
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search mail {
-
- stemming: none
-
- document mail {
-
- field URI type uri {
- indexing: summary
- summary-to: default, mailid
- }
-
- field mailid type string {
- indexing: summary | index
- match: prefix
- summary-to: default, mailid
- }
-
- field date type int {
- indexing: summary | attribute | index
- match: prefix
- }
-
- field from type string {
- indexing: summary | index
- # index-to: from, sender, address, header, default, all
- match: prefix
- }
-
- field replyto type raw {
- indexing: summary | index
- # index-to: replyto
- match: prefix
- }
-
- field to type string {
- indexing: summary | index
- # index-to: to, recipient, address, header, default, all
- match: prefix
- }
-
- field cc type string {
- indexing: index
- # index-to: cc, recipient, address, header, default, all
- match: prefix
- }
-
- field bcc type string {
- indexing: index
- # index-to: bcc
- match: prefix
- }
-
- field subject type string {
- indexing: summary | index
- # index-to: subject, header, default, all
- match: prefix
- }
-
- field mailbody type raw {
- indexing: summary | index
- # index-to: mailbody, default, all
- match: substring
- body
- }
-
- field attachmentcount type int {
- indexing: summary | index
- body
- }
-
- field attachmentnames type string {
- indexing: index
- # index-to: attachmentname, all
- body
- }
-
- field attachmenttypes type string {
- indexing: index
- # index-to: attachmenttype, all
- body
- }
-
- field attachmentlanguages type string {
- indexing: index
- match: prefix
- body
- }
-
- field attachmentcontent type string {
- indexing: summary | index
- # index-to: attachment, all
- match: prefix
- body
- }
-
- field attachments type raw[] {
- body
- }
-
- }
-
- document-summary default {
- summary snippet type string {
- dynamic
- source: body, attachmentcontent
- }
-
- }
-
-}
-
diff --git a/config-model/src/test/derived/documentderiver/music.sd b/config-model/src/test/derived/documentderiver/music.sd
deleted file mode 100644
index a412d510861..00000000000
--- a/config-model/src/test/derived/documentderiver/music.sd
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search music {
-
- document music {
-
- # Link to album main page
- field url type uri { }
-
- # Title of album
- field title type string { }
-
- # Album artist
- field artist type string { }
-
- # Album production year
- field year type int { }
-
- # Album description - about the album
- field description type raw { }
-
- # Names of the album tracks
- field tracks type array<string> { }
-
- # How many have given this album the grade 0/1/2/3
- field popularity type weightedset<int> { }
-
- # How many have given this album the grade 0/1/2/3
- field popularity2 type weightedset<int> {
- weightedset: create-if-nonexistent
- weightedset: remove-if-zero
- }
-
- # How many have given this album the grade 0/1/2/3
- field popularity3 type weightedset<int> {
- weightedset {
- create-if-nonexistent
- remove-if-zero
- }
- }
-
- }
-
-}
-
diff --git a/config-model/src/test/derived/documentderiver/newsarticle.sd b/config-model/src/test/derived/documentderiver/newsarticle.sd
deleted file mode 100644
index d31c309726e..00000000000
--- a/config-model/src/test/derived/documentderiver/newsarticle.sd
+++ /dev/null
@@ -1,126 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search newsarticle {
-
- document newsarticle inherits newssummary {
-
- field dynabstract type string {
- indexing: summary
- }
-
- field body type string {
- body
- indexing: summary | index
- # index-to: body, default
- stemming: none
- }
-
- field othersourcenames type string {
- indexing: summary | index
- # index-to: othersourcenames, source
- stemming: none
- }
-
- field author type string {
- indexing: summary | index
- stemming: none
- }
-
- field otherlanguages type string {
- indexing: summary | index
- # index-to: languages
- stemming: none
- }
-
- field charset type string {
- indexing: summary
- stemming: none
- }
-
- field mimetype type string {
- indexing: summary
- stemming: none
- }
-
- field referrerurl type uri {
- indexing: summary | lowercase | tokenize | index
- stemming: none
- }
-
- field sourcelocation type string {
- indexing: summary | index
- stemming: none
- alias: location
- }
-
- field sourcecountry type string {
- indexing: summary | index
- stemming: none
- # index-to: sourcecountry, sourcelocation
- }
-
- field sourcelocale type string {
- indexing: summary | index
- stemming: none
- # index-to: sourcelocale, sourcelocation
- }
-
- field sourcecontinent type string {
- indexing: summary | index
- stemming: none
- # index-to: sourcecontinent, sourcelocation
- }
-
- field articlecountry type string {
- indexing: summary | index
- stemming: none
- }
-
- field articlelocale type string {
- indexing: summary | index
- stemming: none
- }
-
- field articlecontinent type string {
- indexing: summary | index
- stemming: none
- }
-
- field sourcerank type int {
- indexing: summary | index | set_var tmpsourcerank
- }
-
- field crawldate type long {
- indexing: summary | index
- }
-
- field indexdate type long {
- indexing: now | summary | index
- }
-
- field procdate type long {
- indexing: summary | index
- }
-
- field sourceid type int {
- indexing: summary | index
- }
-
- field sourcefeedid type int {
- indexing: summary | index
- }
-
- }
-
- rank-profile date {
- }
-
- rank-profile usrank inherits default {
- }
-
- rank-profile eurank inherits default {
- }
-
- rank-profile asiarank inherits default {
- }
-
-}
diff --git a/config-model/src/test/derived/documentderiver/newssummary.sd b/config-model/src/test/derived/documentderiver/newssummary.sd
deleted file mode 100644
index c0fcf8c2d85..00000000000
--- a/config-model/src/test/derived/documentderiver/newssummary.sd
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search newssummary {
-
- document newssummary {
-
- field title type string {
- indexing: summary | index
- # index-to: title, titleabstract, default
- stemming: none
- alias: headline
- }
-
- field abstract type string {
- indexing: summary | index
- # index-to: abstract, titleabstract, default
- stemming: none
- }
-
- field sourcename type string {
- indexing: summary | index
- # index-to: sourcename, source
- stemming: none
- }
-
- field providername type string {
- indexing: summary | index
- # index-to: providername, source
- stemming: none
- alias: provider
- }
-
- field thumburl type string {
- indexing: summary | lowercase | tokenize | index
- stemming: none
- }
-
- field thumbwidth type int {
- indexing: summary | index
- }
-
- field thumbheight type int {
- indexing: summary | index
- }
-
- field language type string {
- indexing: summary | index
- # index-to: language, languages
- stemming: none
- }
-
- field crawldocid type string {
- indexing: summary
- stemming: none
- }
-
- field url type uri {
- indexing: summary | lowercase | tokenize | index
- stemming: none
- }
-
- field sourceurl type uri {
- indexing: summary | lowercase | tokenize | index
- stemming: none
- }
-
- field categories type string {
- indexing: summary | index
- stemming: none
- alias: category
- alias: cat
- }
-
- field pubdate type long {
- indexing: summary | index | attribute pubdate | set_var tmppubdate
- alias: date
- }
-
- field expdate type long {
- indexing: summary | index
- }
-
- field fingerprint type int {
- indexing: summary | index
- }
-
- field debug type string {
- indexing {
-
- # Initialize variables used for superduper ranking
- 0 | set_var superduperus | set_var superdupereu | set_var superduperasia;
-
- input debug | lowercase | summary | normalize | tokenize | index;
- input debug | lowercase | split ";" | for_each {
- # Loop through each token in debug string
- switch {
- case "superduperus": 10 | set_var superduperus;
- case "superdupereu": 10 | set_var superdupereu;
- case "superduperasia": 10 | set_var superduperasia;
- }
- };
- }
- indexing-rewrite: none
- stemming: none
- }
-
- field attributes type string {
- indexing {
-
- # Initialize variables used for superduper ranking
- 1 | set_var superdupermod;
-
- input attributes | lowercase | summary | normalize | tokenize | index;
- input attributes | lowercase | split ";" | for_each {
- # Loop through each token in attributes string
- switch {
-
- # De-rank PR articles using the following rules:
- # 1. Set editedstaticrank to '1'
- # 2. Subtract 2.5 hours (9000 seconds) from timestamp used in ranking
- # 3. No superduper rank
- case "typepr": 1 | set_var tmpsourcerank | get_var tmppubdate - 9000 | set_var tmppubdate | 0 | set_var superdupermod;
- }
- };
- }
- indexing-rewrite: none
- stemming: none
- }
-
- field searchcluster type string {
- indexing: summary
- stemming: none
- }
-
- field eustaticrank type int {
- indexing {
- get_var tmpsourcerank * 4000 + get_var superdupereu * get_var superdupermod * 1000 + get_var tmppubdate * 0.5 | summary | index | attribute eustaticrank;
- }
- }
-
- field usstaticrank type int {
- indexing {
- get_var tmpsourcerank * 4000 + get_var superduperus * get_var superdupermod * 1000 + get_var tmppubdate * 0.5 | summary | index | attribute usstaticrank;
- }
- }
-
- field asiastaticrank type int {
- indexing {
- get_var tmpsourcerank * 4000 + get_var superduperasia * get_var superdupermod * 1000 + get_var tmppubdate * 0.5 | summary | index | attribute asiastaticrank;
- }
- }
- }
-
- rank-profile date {
- }
-
- rank-profile usrank inherits default {
- }
-
- rank-profile eurank inherits default {
- }
-
- rank-profile asiarank inherits default {
- }
-
-}
diff --git a/config-model/src/test/derived/documentderiver/sombrero.sd b/config-model/src/test/derived/documentderiver/sombrero.sd
deleted file mode 100644
index 00bd80bc42a..00000000000
--- a/config-model/src/test/derived/documentderiver/sombrero.sd
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search webdoc {
- document webdoc {
- #
- # a simple key-value pair
- #
- struct keyvalue {
- field key type string {}
- field value type string {}
- }
-
- #
- # tags have a name and an array of attributes
- #
- struct tagvalue {
- field name type string {}
- # todo: this should be a map of attributes, not an array
- field attributes type array<keyvalue> {}
- }
-
- #
- # wordforms are (kind, form, weight) triplets
- # todo: "kind" should be an enum; check how enums are used.
- #
- struct wordform {
- field kind type int {}
- field form type string {}
- field weight type float {}
- }
-
- #
- # web documents have zero or more HTML source strings
- #
- field html type string {}
- }
-}
diff --git a/config-model/src/test/derived/documentderiver/vsmfields.cfg b/config-model/src/test/derived/documentderiver/vsmfields.cfg
deleted file mode 100644
index cd8bc659398..00000000000
--- a/config-model/src/test/derived/documentderiver/vsmfields.cfg
+++ /dev/null
@@ -1,390 +0,0 @@
-fieldspec[].name sddocname
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "exact"
-fieldspec[].name title
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name abstract
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name sourcename
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name providername
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name thumburl
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name thumbwidth
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name thumbheight
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name language
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name crawldocid
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name url
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name sourceurl
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name categories
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name pubdate
-fieldspec[].searchmethod INT64
-fieldspec[].arg1 ""
-fieldspec[].name expdate
-fieldspec[].searchmethod INT64
-fieldspec[].arg1 ""
-fieldspec[].name fingerprint
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name debug
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name attributes
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name searchcluster
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name eustaticrank
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name usstaticrank
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name asiastaticrank
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name dynabstract
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name body
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name othersourcenames
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name author
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name otherlanguages
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name charset
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name mimetype
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name referrerurl
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name sourcelocation
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name sourcecountry
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name sourcelocale
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name sourcecontinent
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name articlecountry
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name articlelocale
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name articlecontinent
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name sourcerank
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name crawldate
-fieldspec[].searchmethod INT64
-fieldspec[].arg1 ""
-fieldspec[].name indexdate
-fieldspec[].searchmethod INT64
-fieldspec[].arg1 ""
-fieldspec[].name procdate
-fieldspec[].searchmethod INT64
-fieldspec[].arg1 ""
-fieldspec[].name sourceid
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name sourcefeedid
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name URI
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name mailid
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "prefix"
-fieldspec[].name date
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name from
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "prefix"
-fieldspec[].name replyto
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "prefix"
-fieldspec[].name to
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "prefix"
-fieldspec[].name cc
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "prefix"
-fieldspec[].name bcc
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "prefix"
-fieldspec[].name subject
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "prefix"
-fieldspec[].name mailbody
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "substring"
-fieldspec[].name attachmentcount
-fieldspec[].searchmethod INT32
-fieldspec[].arg1 ""
-fieldspec[].name attachmentnames
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name attachmenttypes
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 ""
-fieldspec[].name attachmentlanguages
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "prefix"
-fieldspec[].name attachmentcontent
-fieldspec[].searchmethod AUTOUTF8
-fieldspec[].arg1 "prefix"
-documenttype[].name newssummary
-documenttype[].index[].name sddocname
-documenttype[].index[].field[].name sddocname
-documenttype[].index[].name title
-documenttype[].index[].field[].name title
-documenttype[].index[].name titleabstract
-documenttype[].index[].field[].name title
-documenttype[].index[].field[].name abstract
-documenttype[].index[].name default
-documenttype[].index[].field[].name title
-documenttype[].index[].field[].name abstract
-documenttype[].index[].name abstract
-documenttype[].index[].field[].name abstract
-documenttype[].index[].name sourcename
-documenttype[].index[].field[].name sourcename
-documenttype[].index[].name source
-documenttype[].index[].field[].name sourcename
-documenttype[].index[].field[].name providername
-documenttype[].index[].name providername
-documenttype[].index[].field[].name providername
-documenttype[].index[].name thumburl
-documenttype[].index[].field[].name thumburl
-documenttype[].index[].name thumbwidth
-documenttype[].index[].field[].name thumbwidth
-documenttype[].index[].name thumbheight
-documenttype[].index[].field[].name thumbheight
-documenttype[].index[].name language
-documenttype[].index[].field[].name language
-documenttype[].index[].name languages
-documenttype[].index[].field[].name language
-documenttype[].index[].name url
-documenttype[].index[].field[].name url
-documenttype[].index[].name sourceurl
-documenttype[].index[].field[].name sourceurl
-documenttype[].index[].name categories
-documenttype[].index[].field[].name categories
-documenttype[].index[].name pubdate
-documenttype[].index[].field[].name pubdate
-documenttype[].index[].name expdate
-documenttype[].index[].field[].name expdate
-documenttype[].index[].name fingerprint
-documenttype[].index[].field[].name fingerprint
-documenttype[].index[].name debug
-documenttype[].index[].field[].name debug
-documenttype[].index[].name attributes
-documenttype[].index[].field[].name attributes
-documenttype[].index[].name eustaticrank
-documenttype[].index[].field[].name eustaticrank
-documenttype[].index[].name usstaticrank
-documenttype[].index[].field[].name usstaticrank
-documenttype[].index[].name asiastaticrank
-documenttype[].index[].field[].name asiastaticrank
-documenttype[].name newsarticle
-documenttype[].index[].name sddocname
-documenttype[].index[].field[].name sddocname
-documenttype[].index[].name title
-documenttype[].index[].field[].name title
-documenttype[].index[].name titleabstract
-documenttype[].index[].field[].name title
-documenttype[].index[].field[].name abstract
-documenttype[].index[].name default
-documenttype[].index[].field[].name title
-documenttype[].index[].field[].name abstract
-documenttype[].index[].field[].name body
-documenttype[].index[].name abstract
-documenttype[].index[].field[].name abstract
-documenttype[].index[].name sourcename
-documenttype[].index[].field[].name sourcename
-documenttype[].index[].name source
-documenttype[].index[].field[].name sourcename
-documenttype[].index[].field[].name providername
-documenttype[].index[].field[].name othersourcenames
-documenttype[].index[].name providername
-documenttype[].index[].field[].name providername
-documenttype[].index[].name thumburl
-documenttype[].index[].field[].name thumburl
-documenttype[].index[].name thumbwidth
-documenttype[].index[].field[].name thumbwidth
-documenttype[].index[].name thumbheight
-documenttype[].index[].field[].name thumbheight
-documenttype[].index[].name language
-documenttype[].index[].field[].name language
-documenttype[].index[].name languages
-documenttype[].index[].field[].name language
-documenttype[].index[].field[].name otherlanguages
-documenttype[].index[].name url
-documenttype[].index[].field[].name url
-documenttype[].index[].name sourceurl
-documenttype[].index[].field[].name sourceurl
-documenttype[].index[].name categories
-documenttype[].index[].field[].name categories
-documenttype[].index[].name pubdate
-documenttype[].index[].field[].name pubdate
-documenttype[].index[].name expdate
-documenttype[].index[].field[].name expdate
-documenttype[].index[].name fingerprint
-documenttype[].index[].field[].name fingerprint
-documenttype[].index[].name debug
-documenttype[].index[].field[].name debug
-documenttype[].index[].name attributes
-documenttype[].index[].field[].name attributes
-documenttype[].index[].name eustaticrank
-documenttype[].index[].field[].name eustaticrank
-documenttype[].index[].name usstaticrank
-documenttype[].index[].field[].name usstaticrank
-documenttype[].index[].name asiastaticrank
-documenttype[].index[].field[].name asiastaticrank
-documenttype[].index[].name body
-documenttype[].index[].field[].name body
-documenttype[].index[].name othersourcenames
-documenttype[].index[].field[].name othersourcenames
-documenttype[].index[].name author
-documenttype[].index[].field[].name author
-documenttype[].index[].name referrerurl
-documenttype[].index[].field[].name referrerurl
-documenttype[].index[].name sourcelocation
-documenttype[].index[].field[].name sourcelocation
-documenttype[].index[].field[].name sourcecountry
-documenttype[].index[].field[].name sourcelocale
-documenttype[].index[].field[].name sourcecontinent
-documenttype[].index[].name sourcecountry
-documenttype[].index[].field[].name sourcecountry
-documenttype[].index[].name sourcelocale
-documenttype[].index[].field[].name sourcelocale
-documenttype[].index[].name sourcecontinent
-documenttype[].index[].field[].name sourcecontinent
-documenttype[].index[].name articlecountry
-documenttype[].index[].field[].name articlecountry
-documenttype[].index[].name articlelocale
-documenttype[].index[].field[].name articlelocale
-documenttype[].index[].name articlecontinent
-documenttype[].index[].field[].name articlecontinent
-documenttype[].index[].name sourcerank
-documenttype[].index[].field[].name sourcerank
-documenttype[].index[].name crawldate
-documenttype[].index[].field[].name crawldate
-documenttype[].index[].name indexdate
-documenttype[].index[].field[].name indexdate
-documenttype[].index[].name procdate
-documenttype[].index[].field[].name procdate
-documenttype[].index[].name sourceid
-documenttype[].index[].field[].name sourceid
-documenttype[].index[].name sourcefeedid
-documenttype[].index[].field[].name sourcefeedid
-documenttype[].name music
-documenttype[].index[].name sddocname
-documenttype[].index[].field[].name sddocname
-documenttype[].name mail
-documenttype[].index[].name sddocname
-documenttype[].index[].field[].name sddocname
-documenttype[].index[].name mailid
-documenttype[].index[].field[].name mailid
-documenttype[].index[].name date
-documenttype[].index[].field[].name date
-documenttype[].index[].name from
-documenttype[].index[].field[].name from
-documenttype[].index[].name sender
-documenttype[].index[].field[].name from
-documenttype[].index[].name address
-documenttype[].index[].field[].name from
-documenttype[].index[].field[].name to
-documenttype[].index[].field[].name cc
-documenttype[].index[].name header
-documenttype[].index[].field[].name from
-documenttype[].index[].field[].name to
-documenttype[].index[].field[].name cc
-documenttype[].index[].field[].name subject
-documenttype[].index[].name default
-documenttype[].index[].field[].name from
-documenttype[].index[].field[].name to
-documenttype[].index[].field[].name cc
-documenttype[].index[].field[].name subject
-documenttype[].index[].field[].name mailbody
-documenttype[].index[].name all
-documenttype[].index[].field[].name from
-documenttype[].index[].field[].name to
-documenttype[].index[].field[].name cc
-documenttype[].index[].field[].name subject
-documenttype[].index[].field[].name mailbody
-documenttype[].index[].field[].name attachmentnames
-documenttype[].index[].field[].name attachmenttypes
-documenttype[].index[].field[].name attachmentcontent
-documenttype[].index[].name replyto
-documenttype[].index[].field[].name replyto
-documenttype[].index[].name to
-documenttype[].index[].field[].name to
-documenttype[].index[].name recipient
-documenttype[].index[].field[].name to
-documenttype[].index[].field[].name cc
-documenttype[].index[].name cc
-documenttype[].index[].field[].name cc
-documenttype[].index[].name bcc
-documenttype[].index[].field[].name bcc
-documenttype[].index[].name subject
-documenttype[].index[].field[].name subject
-documenttype[].index[].name mailbody
-documenttype[].index[].field[].name mailbody
-documenttype[].index[].name attachmentcount
-documenttype[].index[].field[].name attachmentcount
-documenttype[].index[].name attachmentname
-documenttype[].index[].field[].name attachmentnames
-documenttype[].index[].name attachmenttype
-documenttype[].index[].field[].name attachmenttypes
-documenttype[].index[].name attachmentlanguages
-documenttype[].index[].field[].name attachmentlanguages
-documenttype[].index[].name attachment
-documenttype[].index[].field[].name attachmentcontent
-documenttype[].name compressed_header
-documenttype[].index[].name sddocname
-documenttype[].index[].field[].name sddocname
-documenttype[].name compressed_both
-documenttype[].index[].name sddocname
-documenttype[].index[].field[].name sddocname
-documenttype[].name compressed_body
-documenttype[].index[].name sddocname
-documenttype[].index[].field[].name sddocname
diff --git a/config-model/src/test/derived/documentderiver/vsmsummary.cfg b/config-model/src/test/derived/documentderiver/vsmsummary.cfg
deleted file mode 100644
index 03513244430..00000000000
--- a/config-model/src/test/derived/documentderiver/vsmsummary.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-fieldmap[].summary snippet
-fieldmap[].document[].field body
-fieldmap[].document[].field attachmentcontent
-fieldmap[].command FLATTENJUNIPER
diff --git a/config-model/src/test/derived/inheritancebadtypes/child.sd b/config-model/src/test/derived/inheritancebadtypes/child.sd
deleted file mode 100644
index 3a204e95154..00000000000
--- a/config-model/src/test/derived/inheritancebadtypes/child.sd
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search child {
-document child inherits parent {
- field a type int {
- indexing: index
- }
-}
-}
diff --git a/config-model/src/test/derived/inheritancebadtypes/parent.sd b/config-model/src/test/derived/inheritancebadtypes/parent.sd
deleted file mode 100644
index 4fec1b4179f..00000000000
--- a/config-model/src/test/derived/inheritancebadtypes/parent.sd
+++ /dev/null
@@ -1,8 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search parent {
-document parent {
- field a type string {
- indexing: index
- }
-}
-}
diff --git a/config-model/src/test/examples/attributeindex.sd b/config-model/src/test/examples/attributeindex.sd
deleted file mode 100644
index a35be556467..00000000000
--- a/config-model/src/test/examples/attributeindex.sd
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search attributeindex {
- document attributeindex {
-
- field nosettings type string {
- indexing: summary | attribute | index
- }
-
- # Attribute and index have different names
- field specifyname type string {
- indexing: summary | attribute newname | index
- }
-
- # # index-to: with same name as attribute
- field specifyname2 type string {
- indexing: summary | attribute newname2 | index
- # index-to: newname2
- }
-
- field withstaticrankname type string {
- indexing: summary | attribute | index | attribute someothername
- }
- }
-}
diff --git a/config-model/src/test/examples/attributeproperties1.sd b/config-model/src/test/examples/attributeproperties1.sd
deleted file mode 100644
index 233e6656866..00000000000
--- a/config-model/src/test/examples/attributeproperties1.sd
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search bolding {
- document test {
-
- # Setting attribute properties for a non-existent attribute should fail
- field batchid type int {
- indexing: summary | index
- attribute {
- prefetch
- }
- }
-
- # ... but this is OK
- field anotherbatchid type int {
- indexing: summary | index | attribute
- attribute {
- prefetch
- }
- }
- }
-}
diff --git a/config-model/src/test/examples/attributeproperties2.sd b/config-model/src/test/examples/attributeproperties2.sd
deleted file mode 100644
index 9c5e3e1a07f..00000000000
--- a/config-model/src/test/examples/attributeproperties2.sd
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-search bolding {
- document bolding {
-
- # This is how it usually should be
- field anotherbatchid type int {
- indexing: summary | index | attribute
- attribute {
- prefetch
- }
- attribute: huge
- }
-
- # The attribute is created in the next field
- field bar type int {
- indexing: summary | index
- attribute {
- prefetch
- }
- }
-
- # Creates attribute for the previous field
- field foo type int {
- indexing: input bar | attribute bar
- }
- }
-}
diff --git a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
index 8331ada2271..528393e4135 100644
--- a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
@@ -14,7 +14,7 @@ import com.yahoo.document.DataType;
import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.io.IOUtils;
import com.yahoo.searchdefinition.Search;
-import com.yahoo.searchdefinition.UnproperSearch;
+import com.yahoo.searchdefinition.DocumentOnlySearch;
import com.yahoo.vespa.config.ConfigDefinition;
import com.yahoo.vespa.config.ConfigDefinitionKey;
import com.yahoo.vespa.model.VespaModel;
@@ -72,7 +72,7 @@ public class ApplicationDeployTest {
case "sock":
break;
case "product":
- assertTrue(s instanceof UnproperSearch);
+ assertTrue(s instanceof DocumentOnlySearch);
assertEquals(s.getDocument().getField("title").getDataType(), DataType.STRING);
break;
default:
diff --git a/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java b/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java
index 5bd95334396..948c62c2343 100644
--- a/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java
+++ b/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java
@@ -133,6 +133,9 @@ public class MockModelContext implements ModelContext {
@Override
public boolean isFirstTimeDeployment() { return false; }
+
+ @Override
+ public boolean useDedicatedNodeForLogserver() { return false; }
};
}
}
diff --git a/config-model/src/test/java/com/yahoo/config/model/application/provider/SchemaValidatorTest.java b/config-model/src/test/java/com/yahoo/config/model/application/provider/SchemaValidatorTest.java
index fda230e22ab..5c4b51ca3fa 100644
--- a/config-model/src/test/java/com/yahoo/config/model/application/provider/SchemaValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/application/provider/SchemaValidatorTest.java
@@ -2,6 +2,7 @@
package com.yahoo.config.model.application.provider;
import com.yahoo.component.Version;
+import com.yahoo.vespa.config.VespaVersion;
import org.junit.Test;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
@@ -65,6 +66,6 @@ public class SchemaValidatorTest {
}
private SchemaValidator createValidator() throws IOException {
- return new SchemaValidators(new Version(6)).servicesXmlValidator();
+ return new SchemaValidators(new Version(VespaVersion.major)).servicesXmlValidator();
}
}
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index dd28d45ef4d..32a881633b8 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -48,6 +48,7 @@ import java.util.stream.Collectors;
import static com.yahoo.config.model.test.TestUtil.joinLines;
import static com.yahoo.vespa.defaults.Defaults.getDefaults;
+import static org.hamcrest.CoreMatchers.both;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.collection.IsIn.isIn;
import static org.hamcrest.core.Every.everyItem;
@@ -947,11 +948,11 @@ public class ModelProvisioningTest {
" <nodes count='1'/>" +
" </container>" +
"</services>";
- testContainerOnLogserverHost(services);
+ boolean useDedicatedNodeForLogserver = false;
+ testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
- @Ignore // Ignore until we create container on logserver implicitly
public void testImplicitLogserverContainer() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
@@ -960,7 +961,8 @@ public class ModelProvisioningTest {
" <nodes count='1'/>" +
" </container>" +
"</services>";
- testContainerOnLogserverHost(services);
+ boolean useDedicatedNodeForLogserver = true;
+ testContainerOnLogserverHost(services, useDedicatedNodeForLogserver);
}
@Test
@@ -1751,9 +1753,10 @@ public class ModelProvisioningTest {
// Tests that a container is allocated on logserver host and that
// it is able to get config
- private void testContainerOnLogserverHost(String services) {
+ private void testContainerOnLogserverHost(String services, boolean useDedicatedNodeForLogserver) {
int numberOfHosts = 2;
VespaModelTester tester = new VespaModelTester();
+ tester.useDedicatedNodeForLogserver(useDedicatedNodeForLogserver);
tester.addHosts(numberOfHosts);
Zone zone = new Zone(SystemName.cd, Environment.prod, RegionName.defaultName());
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/ArraysTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/ArraysTestCase.java
index 76ea16e47aa..846166ae93c 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/ArraysTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/ArraysTestCase.java
@@ -12,6 +12,7 @@ import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+
/**
* tests importing of document containing array type fields
*
@@ -21,7 +22,7 @@ public class ArraysTestCase extends SearchDefinitionTestCase {
@Test
public void testArrayImporting() throws IOException, ParseException {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/arrays.sd");
+ Search search = SearchBuilder.buildFromFile("src/test/examples/arrays.sd");
SDField tags = (SDField)search.getDocument().getField("tags");
assertEquals(DataType.STRING, ((CollectionDataType)tags.getDataType()).getNestedType());
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/ReservedWordsAsFieldNamesTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/ReservedWordsAsFieldNamesTestCase.java
index 376832c1649..5a5fc1cc312 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/ReservedWordsAsFieldNamesTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/ReservedWordsAsFieldNamesTestCase.java
@@ -15,7 +15,7 @@ public class ReservedWordsAsFieldNamesTestCase extends SearchDefinitionTestCase
@Test
public void testIt() throws IOException, ParseException {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/reserved_words_as_field_names.sd");
+ Search search = SearchBuilder.buildFromFile("src/test/examples/reserved_words_as_field_names.sd");
assertNotNull(search.getDocument().getField("inline"));
assertNotNull(search.getDocument().getField("constants"));
assertNotNull(search.getDocument().getField("reference"));
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/SearchImporterTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/SearchImporterTestCase.java
index 82c03c02f61..127ed7a528b 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/SearchImporterTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/SearchImporterTestCase.java
@@ -5,7 +5,11 @@ import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.document.DataType;
import com.yahoo.document.Document;
import com.yahoo.search.query.profile.QueryProfileRegistry;
-import com.yahoo.searchdefinition.document.*;
+import com.yahoo.searchdefinition.document.Attribute;
+import com.yahoo.searchdefinition.document.RankType;
+import com.yahoo.searchdefinition.document.SDDocumentType;
+import com.yahoo.searchdefinition.document.SDField;
+import com.yahoo.searchdefinition.document.Stemming;
import com.yahoo.searchdefinition.parser.ParseException;
import com.yahoo.searchdefinition.processing.MakeAliases;
import com.yahoo.vespa.documentmodel.SummaryTransform;
@@ -27,7 +31,7 @@ public class SearchImporterTestCase extends SearchDefinitionTestCase {
@Test
public void testSimpleImporting() throws IOException, ParseException {
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
- SearchBuilder sb = new UnprocessingSearchBuilder(rankProfileRegistry, new QueryProfileRegistry());
+ SearchBuilder sb = new SearchBuilder(rankProfileRegistry, new QueryProfileRegistry());
sb.importFile("src/test/examples/simple.sd");
sb.build();
Search search = sb.getSearch();
@@ -36,7 +40,7 @@ public class SearchImporterTestCase extends SearchDefinitionTestCase {
SDDocumentType document = search.getDocument();
assertEquals("simple", document.getName());
- assertEquals(12, document.getFieldCount());
+ assertEquals(25, document.getFieldCount());
SDField field;
Attribute attribute;
@@ -46,7 +50,7 @@ public class SearchImporterTestCase extends SearchDefinitionTestCase {
// First field
field=(SDField) document.getField("title");
assertEquals(DataType.STRING,field.getDataType());
- assertEquals("{ summary | index; }", field.getIndexingScript().toString());
+ assertEquals("{ input title | tokenize normalize stem:\"SHORTEST\" | summary title | index title; }", field.getIndexingScript().toString());
assertTrue(!search.getIndex("default").isPrefix());
assertTrue(search.getIndex("title").isPrefix());
Iterator<String> titleAliases=search.getIndex("title").aliasIterator();
@@ -85,7 +89,7 @@ public class SearchImporterTestCase extends SearchDefinitionTestCase {
// Fifth field
field=(SDField) document.getField("popularity");
- assertEquals("{ attribute; }",
+ assertEquals("{ input popularity | attribute popularity; }",
field.getIndexingScript().toString());
// Sixth field
@@ -96,19 +100,19 @@ public class SearchImporterTestCase extends SearchDefinitionTestCase {
// Seventh field
field= search.getConcreteField("categories");
- assertEquals("{ input categories_src | lowercase | normalize | index; }",
+ assertEquals("{ input categories_src | lowercase | normalize | tokenize normalize stem:\"SHORTEST\" | index categories; }",
field.getIndexingScript().toString());
assertTrue(!field.isHeader());
// Eight field
field= search.getConcreteField("categoriesagain");
- assertEquals("{ input categoriesagain_src | lowercase | normalize | index; }",
+ assertEquals("{ input categoriesagain_src | lowercase | normalize | tokenize normalize stem:\"SHORTEST\" | index categoriesagain; }",
field.getIndexingScript().toString());
assertTrue(field.isHeader());
// Ninth field
field= search.getConcreteField("exactemento");
- assertEquals("{ input exactemento_src | lowercase | index | summary; }",
+ assertEquals("{ input exactemento_src | lowercase | tokenize normalize stem:\"SHORTEST\" | index exactemento | summary exactemento; }",
field.getIndexingScript().toString());
// Tenth field
@@ -153,7 +157,7 @@ public class SearchImporterTestCase extends SearchDefinitionTestCase {
assertEquals("exact",exact.getName());
assertEquals(Stemming.NONE,exact.getStemming());
assertTrue(!exact.getNormalizing().doRemoveAccents());
- assertEquals("{ input title . \" \" . input category | summary | index; }",
+ assertEquals("{ input title . \" \" . input category | tokenize | summary exact | index exact; }",
exact.getIndexingScript().toString());
assertEquals(RankType.IDENTITY, exact.getRankType());
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/StemmingSettingTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/StemmingSettingTestCase.java
index 5b95789bea4..defff99d51e 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/StemmingSettingTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/StemmingSettingTestCase.java
@@ -20,7 +20,7 @@ public class StemmingSettingTestCase extends SearchDefinitionTestCase {
@Test
public void testStemmingSettings() throws IOException, ParseException {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/stemmingsetting.sd");
+ Search search = SearchBuilder.buildFromFile("src/test/examples/stemmingsetting.sd");
SDField artist = (SDField)search.getDocument().getField("artist");
assertEquals(Stemming.SHORTEST, artist.getStemming(search));
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/DocumentDeriverTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/DocumentDeriverTestCase.java
deleted file mode 100644
index 8e0f77c941c..00000000000
--- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/DocumentDeriverTestCase.java
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.searchdefinition.derived;
-
-import com.yahoo.document.*;
-import com.yahoo.searchdefinition.SearchBuilder;
-import com.yahoo.searchdefinition.document.SDDocumentType;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.*;
-
-/**
- * Tests deriving of documentmanager
- *
- * @author <a href="mailto:mathiasm@yahoo-inc.com">Mathias Moelster Lidal</a>
- */
-public class DocumentDeriverTestCase extends AbstractExportingTestCase {
- @Test
- public void testDocumentDeriving() {
- String root = "src/test/derived/documentderiver/";
-
- List<String> files = new ArrayList<>();
- files.add(root + "newsarticle.sd");
- files.add(root + "newssummary.sd");
- files.add(root + "music.sd");
- files.add(root + "mail.sd");
- files.add(root + "compression_header.sd");
- files.add(root + "compression_both.sd");
- files.add(root + "compression_body.sd");
-
- File toDir = new File("temp/documentderiver/");
- toDir.mkdir();
-
- SearchBuilder builder = Deriver.deriveDocuments(files, toDir.getPath());
- try {
- assertEqualFiles(root + "documentmanager.cfg", toDir.getPath() + "/documentmanager.cfg");
- } catch (IOException e) {
- throw new RuntimeException("Exception while comparing files", e);
- }
-
- SDDocumentType doc = builder.getSearch("newsarticle").getDocument();
- assertNotNull(doc);
- }
- @Test
- public void testStructTypesNotUsed() {
- String root = "src/test/derived/documentderiver/";
-
- List<String> files = new ArrayList<>();
- files.add(root + "sombrero.sd");
-
- File toDir = new File("temp/structtypesnotused/");
- toDir.mkdir();
-
- Deriver.deriveDocuments(files, toDir.getPath());
-
- DocumentTypeManager dtm = new DocumentTypeManager();
- int numBuiltInTypes = dtm.getDataTypes().size();
- dtm.configure("file:" + toDir.getPath() + "/documentmanager.cfg");
-
- DocumentType webDocType = dtm.getDocumentType("webdoc");
- assertNotNull(webDocType);
-
- assertEquals(1, webDocType.fieldSet().size());
- Field html = webDocType.getField("html");
- assertNotNull(html);
- assertEquals(DataType.STRING, html.getDataType());
-
- assertEquals(numBuiltInTypes + 8, dtm.getDataTypes().size());
-
- {
- StructDataType keyvalue = (StructDataType) dtm.getDataType("keyvalue");
- assertNotNull(keyvalue);
- assertEquals(2, keyvalue.getFields().size());
- Field key = keyvalue.getField("key");
- assertNotNull(key);
- assertEquals(DataType.STRING, key.getDataType());
- Field value = keyvalue.getField("value");
- assertNotNull(value);
- assertEquals(DataType.STRING, value.getDataType());
- }
- {
- StructDataType tagvalue = (StructDataType) dtm.getDataType("tagvalue");
- assertNotNull(tagvalue);
- assertEquals(2, tagvalue.getFields().size());
- Field name = tagvalue.getField("name");
- assertNotNull(name);
- assertEquals(DataType.STRING, name.getDataType());
- Field attributes = tagvalue.getField("attributes");
- assertNotNull(attributes);
- assertTrue(attributes.getDataType() instanceof ArrayDataType);
- assertEquals(dtm.getDataType("keyvalue"), ((ArrayDataType) attributes.getDataType()).getNestedType());
- }
- {
- StructDataType wordform = (StructDataType) dtm.getDataType("wordform");
- assertNotNull(wordform);
- assertEquals(3, wordform.getFields().size());
- Field kind = wordform.getField("kind");
- assertNotNull(kind);
- assertEquals(DataType.INT, kind.getDataType());
- Field form = wordform.getField("form");
- assertNotNull(form);
- assertEquals(DataType.STRING, form.getDataType());
- Field weight = wordform.getField("weight");
- assertNotNull(weight);
- assertEquals(DataType.FLOAT, weight.getDataType());
- }
-
- }
-
-}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/InheritanceTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/InheritanceTestCase.java
index 2833e0ef004..f37a39d6f93 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/InheritanceTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/InheritanceTestCase.java
@@ -158,18 +158,4 @@ public class InheritanceTestCase extends AbstractExportingTestCase {
assertEquals(new Index("prefixed", true), childSearch.getIndex("prefixed"));
}
- @Test
- public void testFailTypesMismatch() throws IOException, ParseException {
- String root = "src/test/derived/inheritancebadtypes/";
- List<String> files = new LinkedList<>();
- files.add(root + "parent.sd");
- files.add(root + "child.sd");
- File toDir = tmpDir.newFolder("to");
- try {
- Deriver.deriveDocuments(files, toDir.getPath());
- fail("Import of child SD with type mismatch worked.");
- } catch (RuntimeException e) {
- assertTrue(e.getMessage().matches(".*already contains field 'a'.*"));
- }
- }
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/MailTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/MailTestCase.java
index 8cf1846ed04..187e766c315 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/MailTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/MailTestCase.java
@@ -1,25 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.searchdefinition.derived;
-import com.yahoo.config.ConfigInstance;
-import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.searchdefinition.SearchBuilder;
-import com.yahoo.searchdefinition.UnprocessingSearchBuilder;
import com.yahoo.searchdefinition.parser.ParseException;
-import com.yahoo.vespa.model.VespaModel;
-import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithFilePkg;
import org.junit.Test;
-import org.xml.sax.SAXException;
-
-import java.io.File;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
/**
- * Tests VDS+streaming configuration deriving
+ * Tests streaming configuration deriving
*
* @author bratseth
*/
@@ -33,22 +21,4 @@ public class MailTestCase extends AbstractExportingTestCase {
assertCorrectDeriving(sb, dir);
}
- @Test
- public void testMailDocumentsonlyDeriving() {
- String root = "src/test/derived/mail/";
- File toDir = new File("temp/documentderiver/");
- if (!toDir.exists()) {
- toDir.mkdir();
- }
- List<String> files = new ArrayList<>();
- files.add(root + "mail.sd");
- Deriver.deriveDocuments(files, toDir.getPath());
- try {
- assertEqualFiles(root + "onlydoc/documentmanager.cfg",
- toDir.getPath() + "/documentmanager.cfg");
- } catch (IOException e) {
- throw new RuntimeException("Exception while comparing files", e);
- }
- }
-
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/StreamingStructTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/StreamingStructTestCase.java
index 1691d463fad..1be9ee3f465 100755
--- a/config-model/src/test/java/com/yahoo/searchdefinition/derived/StreamingStructTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/StreamingStructTestCase.java
@@ -22,18 +22,7 @@ public class StreamingStructTestCase extends AbstractExportingTestCase {
@Test
public void testStreamingStructExplicitDefaultSummaryClass() throws IOException, ParseException {
- // Tests an issue for mail in Vespa 4.1; specific overrides of default summary class
assertCorrectDeriving("streamingstructdefault");
}
- @Test
- public void testStreamingStructDocumentsonlyDeriving() throws IOException {
- String root = "src/test/derived/streamingstruct/";
- String temp = "temp/documentderiver/";
- new File(temp).mkdir();
- Deriver.deriveDocuments(Arrays.asList(root + "streamingstruct.sd"), temp);
- assertEqualFiles(root + "/onlydoc/documentmanager.cfg",
- temp + "/documentmanager.cfg");
- }
-
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/AttributeIndexTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/AttributeIndexTestCase.java
deleted file mode 100644
index 29ab2d9e8e8..00000000000
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/AttributeIndexTestCase.java
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.searchdefinition.processing;
-
-import com.yahoo.searchdefinition.Search;
-import com.yahoo.searchdefinition.SearchDefinitionTestCase;
-import com.yahoo.searchdefinition.UnprocessingSearchBuilder;
-import com.yahoo.searchdefinition.parser.ParseException;
-import org.junit.Test;
-
-import java.io.IOException;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-/**
- * Test AttributeIndex processor.
- *
- * @author hmusum
- */
-public class AttributeIndexTestCase extends SearchDefinitionTestCase {
- @Test
- public void testAttributeIndex() throws IOException, ParseException {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/attributeindex.sd");
-
- assertTrue(search.getConcreteField("nosettings").getAttributes().get("nosettings") != null);
-
- assertTrue(search.getConcreteField("specifyname").getAttributes().get("newname") != null);
-
- assertTrue(search.getConcreteField("specifyname2").getAttributes().get("newname2") != null);
-
- assertTrue(search.getConcreteField("withstaticrankname").getAttributes().get("withstaticrankname") != null);
-
- assertTrue(search.getConcreteField("withstaticrankname").getAttributes().get("someothername") != null);
- }
-}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/AttributePropertiesTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/AttributePropertiesTestCase.java
deleted file mode 100644
index 3a0fedfd550..00000000000
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/AttributePropertiesTestCase.java
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.searchdefinition.processing;
-
-import com.yahoo.config.model.application.provider.BaseDeployLogger;
-import com.yahoo.searchdefinition.RankProfileRegistry;
-import com.yahoo.searchdefinition.Search;
-import com.yahoo.searchdefinition.SearchDefinitionTestCase;
-import com.yahoo.searchdefinition.UnprocessingSearchBuilder;
-import com.yahoo.searchdefinition.parser.ParseException;
-import com.yahoo.vespa.model.container.search.QueryProfiles;
-import org.junit.Test;
-
-import java.io.IOException;
-
-import static org.junit.Assert.fail;
-/**
- * Test AttributeProperties processor.
- *
- * @author hmusum
- */
-public class AttributePropertiesTestCase extends SearchDefinitionTestCase {
-
- @Test
- public void testInvalidAttributeProperties() throws IOException, ParseException {
- try {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/attributeproperties1.sd");
- new AttributeProperties(search, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfiles()).process(true, false);
- fail("attribute property should not be set");
- } catch (RuntimeException e) {
- // empty
- }
- }
-
- @Test
- public void testValidAttributeProperties() throws IOException, ParseException {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/attributeproperties2.sd");
- new AttributeProperties(search, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfiles()).process(true, false);
- }
-
-}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/BoldingTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/BoldingTestCase.java
index 1ab8b054cb7..ac3ba1d98d9 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/BoldingTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/BoldingTestCase.java
@@ -4,8 +4,8 @@ package com.yahoo.searchdefinition.processing;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.searchdefinition.RankProfileRegistry;
import com.yahoo.searchdefinition.Search;
+import com.yahoo.searchdefinition.SearchBuilder;
import com.yahoo.searchdefinition.SearchDefinitionTestCase;
-import com.yahoo.searchdefinition.UnprocessingSearchBuilder;
import com.yahoo.searchdefinition.parser.ParseException;
import com.yahoo.vespa.model.container.search.QueryProfiles;
import org.junit.Test;
@@ -23,7 +23,7 @@ public class BoldingTestCase extends SearchDefinitionTestCase {
@Test
public void testBoldingNonString() throws IOException, ParseException {
try {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/processing/boldnonstring.sd");
+ Search search = SearchBuilder.buildFromFile("src/test/processing/boldnonstring.sd");
new Bolding(search, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfiles()).process(true, false);
fail();
} catch (IllegalArgumentException e) {
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/IntegerIndex2AttributeTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/IntegerIndex2AttributeTestCase.java
index 31631b0dc74..a8ba762b32b 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/IntegerIndex2AttributeTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/IntegerIndex2AttributeTestCase.java
@@ -4,8 +4,8 @@ package com.yahoo.searchdefinition.processing;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.searchdefinition.RankProfileRegistry;
import com.yahoo.searchdefinition.Search;
+import com.yahoo.searchdefinition.SearchBuilder;
import com.yahoo.searchdefinition.SearchDefinitionTestCase;
-import com.yahoo.searchdefinition.UnprocessingSearchBuilder;
import com.yahoo.searchdefinition.document.SDField;
import com.yahoo.searchdefinition.parser.ParseException;
import com.yahoo.vespa.model.container.search.QueryProfiles;
@@ -15,6 +15,7 @@ import java.io.IOException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+
/**
* @author baldersheim
*/
@@ -22,7 +23,7 @@ public class IntegerIndex2AttributeTestCase extends SearchDefinitionTestCase {
@Test
public void testIntegerIndex2Attribute() throws IOException, ParseException {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/integerindex2attribute.sd");
+ Search search = SearchBuilder.buildFromFile("src/test/examples/integerindex2attribute.sd");
new IntegerIndex2Attribute(search, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfiles()).process(true, false);
SDField f;
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/SummaryFieldsMustHaveValidSourceTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/SummaryFieldsMustHaveValidSourceTestCase.java
index 39ab7195892..dbcfc8c202d 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/SummaryFieldsMustHaveValidSourceTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/SummaryFieldsMustHaveValidSourceTestCase.java
@@ -2,8 +2,11 @@
package com.yahoo.searchdefinition.processing;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
-import com.yahoo.searchdefinition.*;
+import com.yahoo.searchdefinition.RankProfileRegistry;
+import com.yahoo.searchdefinition.Search;
+import com.yahoo.searchdefinition.SearchBuilder;
+import com.yahoo.searchdefinition.SearchDefinitionTestCase;
import com.yahoo.searchdefinition.parser.ParseException;
import com.yahoo.vespa.model.container.search.QueryProfiles;
import org.junit.Test;
@@ -17,9 +20,8 @@ public class SummaryFieldsMustHaveValidSourceTestCase extends SearchDefinitionTe
@Test
public void requireThatInvalidSourceIsCaught() throws IOException, ParseException {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/invalidsummarysource.sd");
try {
- new SummaryFieldsMustHaveValidSource(search, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfiles()).process(true, false);
+ SearchBuilder.buildFromFile("src/test/examples/invalidsummarysource.sd");
fail("This should throw and never get here");
} catch (IllegalArgumentException e) {
assertEquals("For search 'invalidsummarysource', summary class 'baz', summary field 'cox': there is no valid source 'nonexistingfield'.", e.getMessage());
@@ -28,9 +30,8 @@ public class SummaryFieldsMustHaveValidSourceTestCase extends SearchDefinitionTe
@Test
public void requireThatInvalidImplicitSourceIsCaught() throws IOException, ParseException {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/invalidimplicitsummarysource.sd");
try {
- new SummaryFieldsMustHaveValidSource(search, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfiles()).process(true, false);
+ SearchBuilder.buildFromFile("src/test/examples/invalidimplicitsummarysource.sd");
fail("This should throw and never get here");
} catch (IllegalArgumentException e) {
assertEquals("For search 'invalidsummarysource', summary class 'baz', summary field 'cox': there is no valid source 'cox'.", e.getMessage());
@@ -39,9 +40,8 @@ public class SummaryFieldsMustHaveValidSourceTestCase extends SearchDefinitionTe
@Test
public void requireThatInvalidSelfReferingSingleSource() throws IOException, ParseException {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/invalidselfreferringsummary.sd");
try {
- new SummaryFieldsMustHaveValidSource(search, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfiles()).process(true, false);
+ SearchBuilder.buildFromFile("src/test/examples/invalidselfreferringsummary.sd");
fail("This should throw and never get here");
} catch (IllegalArgumentException e) {
assertEquals("For search 'invalidselfreferringsummary', summary class 'withid', summary field 'w': there is no valid source 'w'.", e.getMessage());
@@ -50,7 +50,7 @@ public class SummaryFieldsMustHaveValidSourceTestCase extends SearchDefinitionTe
@Test
public void requireThatDocumentIdIsAllowedToPass() throws IOException, ParseException {
- Search search = UnprocessingSearchBuilder.buildUnprocessedFromFile("src/test/examples/documentidinsummary.sd");
+ Search search = SearchBuilder.buildFromFile("src/test/examples/documentidinsummary.sd");
BaseDeployLogger deployLogger = new BaseDeployLogger();
RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
new SummaryFieldsMustHaveValidSource(search, deployLogger, rankProfileRegistry, new QueryProfiles()).process(true, false);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java b/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
index 094494073df..e4198115b2a 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
@@ -216,6 +216,9 @@ public class VespaModelFactoryTest {
@Override
public boolean isFirstTimeDeployment() { return false; }
+
+ @Override
+ public boolean useDedicatedNodeForLogserver() { return false; }
};
}
};
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
index 1023733a652..060fe96100d 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.model.test;
import com.google.common.collect.ImmutableList;
+import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.model.ConfigModelRegistry;
import com.yahoo.config.model.NullConfigModelRegistry;
@@ -46,6 +47,7 @@ public class VespaModelTester {
private boolean hosted = true;
private Map<String, Collection<Host>> hostsByFlavor = new HashMap<>();
private ApplicationId applicationId = ApplicationId.defaultId();
+ private boolean useDedicatedNodeForLogserver = false;
public VespaModelTester() {
this(new NullConfigModelRegistry());
@@ -92,6 +94,10 @@ public class VespaModelTester {
applicationId = ApplicationId.from(tenant, applicationName, instanceName);
}
+ public void useDedicatedNodeForLogserver(boolean useDedicatedNodeForLogserver) {
+ this.useDedicatedNodeForLogserver = useDedicatedNodeForLogserver;
+ }
+
/** Creates a model which uses 0 as start index and fails on out of capacity */
public VespaModel createModel(String services, String ... retiredHostNames) {
return createModel(Zone.defaultZone(), services, true, retiredHostNames);
@@ -131,6 +137,7 @@ public class VespaModelTester {
DeployProperties properties = new DeployProperties.Builder()
.hostedVespa(hosted)
.applicationId(applicationId)
+ .useDedicatedNodeForLogserver(useDedicatedNodeForLogserver)
.build();
DeployState deployState = new DeployState.Builder()
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithFilePkg.java b/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithFilePkg.java
index 3c9618ceccd..8147d2e00ca 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithFilePkg.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithFilePkg.java
@@ -10,6 +10,7 @@ import com.yahoo.config.model.api.ValidationParameters.FailOnIncompatibleChange;
import com.yahoo.config.model.api.ValidationParameters.IgnoreValidationErrors;
import com.yahoo.config.model.application.provider.*;
import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.vespa.config.VespaVersion;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.application.validation.Validation;
@@ -50,7 +51,7 @@ public class VespaModelCreatorWithFilePkg {
public void validate() throws IOException {
ApplicationPackageXmlFilesValidator validator =
- ApplicationPackageXmlFilesValidator.create(applicationPkg.getAppDir(), new Version(6));
+ ApplicationPackageXmlFilesValidator.create(applicationPkg.getAppDir(), new Version(VespaVersion.major));
validator.checkApplication();
validator.checkIncludedDirs(applicationPkg);
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeType.java b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeType.java
index 763118dbf03..32718524997 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/NodeType.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/NodeType.java
@@ -24,7 +24,10 @@ public enum NodeType {
config(false, "Config server"),
/** A host of a (docker) config server node */
- confighost(true, "Config docker host");
+ confighost(true, "Config docker host"),
+
+ /** A controller */
+ controller(true, "Controller");
private final boolean isDockerHost;
private final String description;
diff --git a/config/src/main/java/com/yahoo/vespa/config/ConfigVerification.java b/config/src/main/java/com/yahoo/vespa/config/ConfigVerification.java
index bd6f5fe99d2..a020c8c8c55 100644
--- a/config/src/main/java/com/yahoo/vespa/config/ConfigVerification.java
+++ b/config/src/main/java/com/yahoo/vespa/config/ConfigVerification.java
@@ -17,8 +17,7 @@ import java.util.*;
/**
* Tool to verify that configs across multiple config servers are the same.
*
- * @author lulf
- * @since 5.12
+ * @author Ulf Lilleengen
*/
public class ConfigVerification {
private final static int port = 19071;
diff --git a/config/src/test/java/com/yahoo/vespa/config/classes/app.1.def b/config/src/test/java/com/yahoo/vespa/config/classes/app.1.def
deleted file mode 100644
index df2a57bad04..00000000000
--- a/config/src/test/java/com/yahoo/vespa/config/classes/app.1.def
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-message string default="Hello!"
-
-times int default=1
-
-a[].name string
diff --git a/config/src/test/java/com/yahoo/vespa/config/classes/qr-templates.3.def b/config/src/test/java/com/yahoo/vespa/config/classes/qr-templates.3.def
deleted file mode 100644
index d3d7ff87cbd..00000000000
--- a/config/src/test/java/com/yahoo/vespa/config/classes/qr-templates.3.def
+++ /dev/null
@@ -1,141 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-## Directory for temporary files
-directory string default="tmp/templates"
-
-hey[].ho[].lets[].go string default="ramones"
-hey[].ho[].lets[].fishing int default=-4 range=[-8,0]
-hey[].ho[].lets[].gone int default=200 range=[0,1000000]
-hey[].ho[].lets[].ref reference
-hey[].ho[].gone int default=2000 range=[-10,2000000]
-hey[].ho[].going bool default=false
-hey[].ho[].wash double default=345.3
-hey[].ho[].me double default=-45.5 range=[-234.43,0]
-hey[].ho[].now double default=-34 range=[-234,0]
-hi[].there[].e enum { BATCH, REALTIME, INCREMENTAL} default=BATCH
-hi[].ther[].f enum { BATCH, REALTIME } default=BATCH
-
-#hey[] int
-mode enum { BATCH, REALTIME, INCREMENTAL} default=BATCH
-bar.arrline[] string
-az[] double
-bar.arline[] int range=[0,999]
-#bar[].version int
-b1[].b2[].b3[].b4[] bool
-## Capacities for all storage nodes
-capacity[] double range=[0,100]
-
-longVal long
-longWithDefault long default=9876543210
-longWithRange long range=[-9000000000,0]
-longArr[] long
-longArrWithRange[] long range=[0,9000000000]
-
-fileVal file
-fileWithDefault file
-fileArr[] file
-
-washing double default=5 range=[-1.4,34.324432]
-washer double default=46 range=[-1.6,54]
-
-urlprefix string
-
-## Prefix to use in queries to choose a given template
-templateset[].urlprefix string
-
-## The MIME type of a given template
-templateset[].mimetype string default="text/html"
-
-## The character set of a given template
-templateset[].encoding string default="iso-8859-1"
-
-## Not used
-templateset[].rankprofile int default=0
-
-
-## Not used in 1.0
-templateset[].keepalive bool default=false
-
-## Header template. Always rendered.
-templateset[].headertemplate string
-
-## Footer template. Always rendered.
-templateset[].footertemplate string
-
-## Nohits template. Rendered if there are no hits in the result.
-templateset[].nohitstemplate string
-
-## Hit template. Rendered if there are hits in the result.
-templateset[].hittemplate string
-
-## Error template. Rendered if there is an error condition. This is
-## not mutually exclusive with the (no)hit templates as such.
-templateset[].errortemplate string
-
-groupsheadertemplate string default="[DEFAULT]"
-
-## Aggregated groups header template.
-## Default rendering is used if missing
-templateset[].groupsheadertemplate string default="[DEFAULT]"
-
-## Aggregated range group template.
-## Default rendering is used if missing
-templateset[].rangegrouptemplate string default="[DEFAULT]"
-
-## Aggregated exact group template
-## Default rendering is used if missing
-templateset[].exactgrouptemplate string default="[DEFAULT]"
-
-## Aggregated groups footer template.
-## Default rendering is used if missing
-templateset[].groupsfootertemplate string default="[DEFAULT]"
-
-## Tags used to highlight results, starting a bolded section.
-## An empty string means the template should no override what
-## was inserted by the search chain.
-templateset[].highlightstarttag string default=""
-## Tags used to highlight results, ending a bolded section
-## An empty string means the template should no override what
-## was inserted by the search chain.
-templateset[].highlightendtag string default=""
-## Tags used to highlight results, separating dynamic snippets
-## An empty string means the template should no override what
-## was inserted by the search chain.
-templateset[].highlightseptag string default=""
-
-## The summary class to use for this template if there is none
-## defined in the query.
-ilscript[].name string
-ilscript[].doctype string
-ilscript[].content[] string
-config[].id reference
-config[].autostart string default="no"
-musum string
-
-auran string
-
-route[].name string
-route[].selector string
-route[].feed string
-
-languages[] string
-languages2[] string
-foolang[].lang[] string
-
-# Maps
-myIntMap{} int
-myStringMap{} string
-myStructMap{}.myInt int
-myStructMap{}.myString string
-myStructMap{}.myIntDef int default=56
-myStructMap{}.myStringDef string default="g"
-
-myStructMap{}.myNestedLeafMap{} long
-myStructMap{}.myNestedArray[] long
-
-myStructMap{}.myNestedMap{}.myLong long
-myStructMap{}.myNestedMap{}.myLongDef long default=-100
-
-myStructMap{}.myStruct.a string
-myStructMap{}.myStruct.b string default="pizza"
-myStructMap{}.myStruct.c file
diff --git a/config/src/test/java/com/yahoo/vespa/config/classes/ranges.1.def b/config/src/test/java/com/yahoo/vespa/config/classes/ranges.1.def
deleted file mode 100644
index 5377b143d77..00000000000
--- a/config/src/test/java/com/yahoo/vespa/config/classes/ranges.1.def
+++ /dev/null
@@ -1,4 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-quux int default=5 range=[,]
-xyzzy double default=5 range=[,]
-longVal long default=5 range=[,]
diff --git a/config/src/test/java/com/yahoo/vespa/config/classes/testfoobar.12.def b/config/src/test/java/com/yahoo/vespa/config/classes/testfoobar.12.def
deleted file mode 100644
index 863ac2b0fed..00000000000
--- a/config/src/test/java/com/yahoo/vespa/config/classes/testfoobar.12.def
+++ /dev/null
@@ -1,918 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-longVal long
-longWithDefault long default=8589934592
-
-fileVal file
-fileWithDefault file
-
-vh[] double range=[-300,300]
-bg[] int default=0 range=[-10,10]
-gee[] string
-storage[].feeder[] string
-storage[].distributor[] string
-
-ju[].hu[].tu[] double default=-45 range=[0,1000.1]
-ju[].hu[].wu[] enum { HEY, HO} default=HO
-ju[].hu[].tang[] bool default=false
-ju[].hu[].clan[] int default=45 range=[-90,90]
-ju[].hu[].sann reference
-
-foo string
-headertemplate string
-## If true, the bitvector part of results with number of hits within
-## max bin size will be merged into array form before applying static
-## rank. NB: Setting this to true may reduce performance.
-applystaticranktosmallbitvectors bool default=true
-
-## Size of bitvector cache (in bytes).
-bitvectorcachesize int default=50000000
-
-## Size of boolean occurrence cache (in bytes).
-boolocccachesize int default=100000000
-
-## Size of dictionary cache (in bytes).
-dictcachesize int default=40000000
-
-onedimstruct[].val string
-
-## Size of document info cache (in bytes).
-documentinfocachesize int default=25000000
-
-## Size of filter occurrence cache (in bytes).
-## This cache is used to optimizse exclusion handling.
-## A too small size limit will cause serious performance degradation
-## if the staticfilterindexes keyword is used.
-filterocccachesize int default=30000000
-
-## Size of integer range bitvector cache (in bytes).
-intrangebitvectorcachesize int default=50000000
-
-## Size of integer occurrence cache (in bytes).
-intocccachesize int default=100000000
-
-## Size of phrase occurrence cache (in bytes).
-phrasecachesize int default=150000000
-
-## Size of phrase occurrence index cache (in bytes).
-phraseidxcachesize int default=20000000
-
-## Size of position occurrence cache (in bytes).
-posocccachesize int default=100000000
-
-## The filename of a file specifying which hosts that should have
-## access to the internal web-server of the fsearch process.
-accesslist string default=""
-
-## The filename of the log file for HTTP access.
-accesslog string default=""
-
-## The number of threads to perform async occurrence fetch, i.e.
-## read from posocc and boolocc files and possibly generation of
-## posocc/boolocc arrays for phrases. Async occurrence fetches
-## will use more system CPU but can reduce latency on lightly
-## loaded systems.
-asyncfetchocc int default=0
-
-## specifies the high limit of the ranked result bin.
-## If the percentage of the resultset specified in binsize is higher
-## than this limit, this will be the max size.
-binhigh int default=2147483647
-
-## specifies the lowest possible size of a ranked result. This is
-## the lower ramp of the percentage specified in the binsize variable.
-binlow int default=10000
-
-## specifies the size of the ranked results as a percentage of the
-## total result set size. The percentage can be ramped off with the
-## binlow and binhigh variables. NB: Setting this to 100.0 may lead to
-## seriously reduced performance.
-binsize double default=100.0
-
-## Check cache lines beyond offset + maxhits for blacklisting
-## in order to provide correct "totalhits" for queries with many hits.
-checktrailingcachelines bool default=false
-
-## specifies the number of result entries that are processed when
-## doing site collapsing. Site collapsing is performed by reordering
-## the first collapseentries hits in the result set, listing
-## the best hit from each site first.
-collapseentries int default=200 range=[0,1000000]
-
-## Specifies what is the default field used for collapse
-defaultcollapse string default=""
-
-## specifies the rank penalty for additional hits from a site found
-## during site collapsing. When additional hits from a site are
-## found, the rank values of those hits are reduced by shifting
-## them collapserankshift places to the right.
-collapserankshift int default=0
-
-## The maximum number of active and queued requests. Exceeding
-## requests will be discarded.
-cutofftransportconns int default=1024
-
-## specifies the directory where the dataset resides. Only the
-## directory needs to be specified. fsearch will then read the config
-## files (index.cf) in that directory to discover the structure.
-datasetdir string default=""
-
-## If set to "yes", then fallback to the default index if a query
-## term specifies a non-existing index. If set to "no", then always
-## return 0 hits for a query term that specifies a non-existing index.
-## Note that the result set for the entire query might still contain
-## hits from other query terms unless the invalid query term was an
-## and-term. Default value is "no".
-defaultindexfallback string default="no"
-
-# ???
-docattrhashsize int default=8171
-# ???
-docidhashsize int default=8171
-# ???
-docport int default=0
-
-## If present, the result cache is not flushed due to reload operation
-## unless the document summaries have changed.
-dontflushresultcacheonreload bool default=false
-
-## Colon-delimeted list of catalogs for which negative dictionary
-## entries should not be cached. Default is unset. Most useful for
-## dictionary files that are memorymaped or memorylocked in
-## indextune.cf. Note that the list must start and end with a colon (:).
-dropnegativedictionarycacheentriescatalogs string default=""
-
-## specifies a term (used in an ANDNOT) to be used for filtering ALL queries.
-excludefilter string default=""
-
-## If set to "yes", use of firstoccproximity is enabled. If set to
-## "no", use of firstoccproximity is disabled.
-firstoccproximity string default=""
-
-## If present, the filter occurrence cache is flushed due to reload
-## operation when all queries using the old configuration has completed.
-flushfilteroccscacheonreload bool default=false
-
-## Do not use position occurrence information, even though it might
-## be present in the index.
-forceemptyposoccs bool default=false
-
-## The port to run Fnet Remote Tools (RPC) service on.
-## If set to 0, no FRT service is provided.
-frtport int default=0
-
-## The directory where gid based blacklist files are found.
-## Used for "realtime" indexing setups.
-gidblacklistdir string default="../gidblacklist"
-
-## A semicolon-separated list of index names and index name prefixes
-## defining the set of indexes that are relevant when highlighting
-## query keywords. The terms and phrases from the query targeting any
-## of these indexes will be highlighted when dynamic teasers are
-## generated. In order to separate index names and index name prefixes
-## in the list, index name prefixes have a trailing '*'. Note that
-## index aliases are treated like actual index names. This means that
-## if you have an index relevant for highlighting and an index alias
-## pointing to that index, you need to configure both as relevant for
-## highlighting if you want to highlight keywords targeting both the
-## actual index and the alias. Example config value: "normal*;title".
-## Default config value: "*" (highlight all keywords).
-highlightindexes string default="*"
-
-# ???
-hostname string default=""
-
-## provide a HTTP server at the given port number.
-hport int default=8002
-
-## If true, the TCP_NODELAY option is set on the http connections.
-## This causes non-full packets to be sent even though previously sent
-## data hasn't yet been acknowledged (e.g. due to the delayed ack
-## feature present on various tcp stacks).
-httpdnodelay bool default=false
-
-# ???
-intoccpoolsize int default=32768
-# ???
-intoccpoolstep int default=32768
-# ???
-jobqueuethreads int default=5
-
-## Juniper configuration property map.
-## currently known keys with defaults:
-## [juniper.dynsum.highlight_on] = "<b>"
-## [juniper.dynsum.highlight_off] = "</b>"
-## [juniper.dynsum.continuation] = "..."
-## [juniper.dynsum.length] = "256"
-## [juniper.dynsum.min_length] = "128"
-## [juniper.stem.min_length] = "5"
-## [juniper.stem.max_extend] = "3"
-## [juniper.dynsum.surround_max] = "128"
-## [juniper.dynsum.max_matches] = "3"
-## [juniper.dynsum.escape_markup] = "auto"
-## [juniper.matcher.winsize] = "200"
-## [juniper.dynsum.separators] = "\0x1F\0x1D"
-## [juniper.dynsum.connectors] = "\0x1F\0x1D"
-## [juniper.proximity.factor] = "0.25"
-## [juniper.debug_mask] = "0"
-##
-#junipersetup properties
-
-## The maximum number of HTTP connections.
-maxhttpconns int default=1024
-
-## The maximum interval between a successful read from a socket
-## before timeout, in seconds.
-maxsocksilent double default=5.0
-
-## The maximum number of threads to use.
-maxthreads int default=100
-
-## The maximum number of active requests at any time. Exceeding
-## requests will be queued.
-maxtransportconns int default=15
-
-## If present the index to the document summary file (docsum.idx)
-## is accessed on disk on each access instead of being cached in
-## memory. For experimental use on systems with very many docu-
-## ments but very few actual docsum requests.
-nodocsumidxinmemory bool default=false
-
-## If set then locks on result cache are held only for very
-## short intervals and only a single cache element is locked at a
-## time. This simplifies the mutex locking order, but cause extra
-## load due to queries that would previously first block then use
-## cached value now being fully evaluted.
-nonblockingresultcache bool default=false
-
-## A boolean value controlling removal of several common accented
-## uses of characters, used when matching for highlighting.
-normalize.accentremoval bool default=true
-
-## A boolean value controlling normalizing of LATIN CAPITAL/SMALL
-## LIGATURE OE (U+0152 U+0153) to the string "oe", used when matching
-## for highlighting.
-normalize.ligaturesubstitution bool default=true
-
-## A boolean value controlling normalizing of various accented letters
-## to two chars, for linguistics compatibility.
-normalize.multicharexpansion bool default=true
-
-## A boolean value controlling normalizing of LATIN SMALL LETTER SHARP S
-## (U+00DF) to the string "ss", used when matching for highlighting.
-normalize.sharpssubstitution bool default=true
-
-## If true, queries are still handled during the reload operation
-## (even when the document summaries have changed). If false then
-## queries are stalled until reload has completed.
-overlappedreload bool default=true
-
-## The partition number to report to the connecting fdispatch process
-## if the dataset label didn't specify the partition number.
-partition int default=0
-
-## If set to "yes", use of proximity (cf. proximity and firstoccproximity)
-## will affect phrases in addition to single words. If set to "no",
-## use of proximity is never used for phrases.
-phraseproximity string default=""
-
-## The file name to write the PID of the fsearch process to.
-pidfile string default=""
-
-## Minimum value for maximum value of number of 'posocc' entries
-## for a word. If set to 0, computed as 2 * binlow.
-posbinhigh int default=0
-
-## Maximum value for maximum value of number of 'posocc' entries
-## for a word. If set to 0, computed as min(4 * binhigh, 0x7fffffff)
-posbinlow int default=0
-
-## The maximum value for number of 'posocc' entries for a word,
-## specified as a percentage of the number of documents in the index.
-## If more entries are needed for evaluation, posocc entries are not
-## used for that word and evaluation will be performed without full
-## proximity support. The percentage can be ramped off with the posbinlow
-## and posbinhigh variables. If set to 0, computed as 2.0 * binsize.
-posbinsize double default=0
-
-## If set to "yes", use of posocc files is enabled, except when
-## "forceemptyposoccs" is set or posocc files doesn't exist. If
-## set to "no", use of posocc files is disabled.
-proximity string default=""
-
-## Selects behavior when proximity can be used for two words but
-## not three words while firstoccproximity can be used for three
-## words. If set to "yes", then use proximity for two words. If
-## set to "no", then use firstoccproximity for three words.
-proximitypairbeforefirstoccproximitytriple string default=""
-
-## Selects behavior when proximity can be used for three words but
-## not four words while firstoccproximity can be used for four
-## words. If set to "yes", then use proximity for three words. If
-## set to "no", then use firstoccproximity for four words. The
-## default is "yes".
-proximitytriplebeforefirstoccproximityquad string default=""
-
-## specifies the port number for the persistent internal transport
-## protocol provided for a multi-level dispatch system.
-ptport int default=8003
-
-## a reference to a rank-profiles.def type configuration
-## describing how to rank results. If empty, fsearch loads
-## rank.cf from the dataset directory, see rank.cf(5).
-rankcf reference
-
-## specifies a lower limit for the rankvalue of the results
-## returned from the search node.
-rankcutoff int default=0
-
-## if set, an internal calculation is used for determining a
-## rank cutoff value as above.
-rankcutoffadvanced bool default=false
-
-## Specifies the constant value used in the internal advanced rank
-## cutoff calculations done when the rankcutoffadvanced parameter is set.
-## This roughly reflects the expected rank contribution of
-## one good term.
-rankcutoffadvval int default=0
-
-# ???
-rankinginfopoolsize int default=1048576
-# ???
-rankinginfopoolstep int default=262144
-## Grace period (in seconds) after index reload where old index is
-## still available.
-reloadgraceperiod int default=64
-
-## Maximum number of entries in the resultattributescache.
-resultattributescachequeries int default=0
-## Maximum number of entries in the resultattributescache.
-## 0 means no limitation.
-resultattributescachesize int default=5000000
-
-## The maximum lifetime of a resultcache element in seconds.
-resultcachemaximumlifetime int default=7200
-## The minimum lifetime of a resultcache element in seconds.
-resultcacheminimumlifetime int default=3600
-
-## Maximum number of entries in the resultcache.
-## 0 means no limitation.
-resultcachequeries int default=0
-## Maximum size (in bytes) in the resultcache.
-resultcachesize int default=50000000
-
-# ???
-rewriter.indexes string default=""
-# ???
-rewriter.langfield string default="bsumlanguage"
-# ???
-rewriter.rootdir string default=""
-# ???
-#rewritersetup properties
-
-## Set the number of samples a disk is marked as slow before
-## starting selftest when no progress occurred during the last
-## slowdisksamples samples.
-slowdisklatch int default=1
-
-## Set the number of milliseconds to sleep between each sample of
-## disk state.
-slowdisksamplemillisleep int default=100
-
-## Set the number of nanoseconds to sleep between each sample of
-## disk state. If 0, use slowdisksamplemillisleep instead.
-## Restriction: This option only has effect on FreeBSD
-## using the LinuxThreads port.
-slowdisksamplenanosleep int default=0
-
-## Set the number of contigous disk samples without progress and
-## outstanding requests before a disk is detected as slow.
-## If zero, automatic slowdisk detection is turned off.
-## Recommended value is '20' (when using default values for the
-## other slowdisk detection parameters).
-slowdisksamples int default=0
-
-# ???
-staticfilter string default=""
-
-## specifies a set of indexes for which to optimize exclusion
-## handling. Colon is used to separate index names. Default is unset.
-## The optimization use entries in the filter occurrence cache, thus
-## a too small size limit of the cache will cause serious performance
-## degradation.
-staticfilterindexes string default=""
-
-# ???
-strictbind bool default=false
-
-## Specifies a file containing url coded queries to run as part of self
-## test initiated when a slow disk has been detected.
-testloadfile string default=""
-
-## the type of transport to use. Currently only "fnet" is available.
-transport string default=""
-
-## Specifies the transport access log file used by the http server.
-## The real log file name is created by using the strftime function
-## with the given argument as template.
-transportaccesslog string default=""
-
-## Specifies the interval between transport access log rotation.
-## This is the number of minutes between log rotation, e.g a value
-## of 1440 indicates that the log should be rotated every 24 hour.
-transportaccesslogcycle int default=1440
-
-## Specifies the offset from the start of each cycle when the
-## transport access log should be cycled automatically. The unit
-## is seconds, e.g. a value of 1020 indicates that the log should
-## be cycled at 17 minutes past each cycle.
-transportaccesslogcycleoffset int default=0
-
-## Reference to VSM (Vespa Stream Matcher) configuration. If this is
-## set, fsearch will run in VSM mode.
-vsmconfig reference
-
-## If true, the TCP_NODELAY option is set on the persistent transport
-## connections. This causes non-full packets to be sent even though
-## previously sent data hasn't yet been acknowledged (e.g. due to the
-## delayed ack feature present on various tcp stacks).
-transportnodelay bool default=true
-
-# ???
-wordfolder bool default=false
-# ???
-wordhashsize int default=524269
-# ???
-wordoccpoolsize int default=2097152
-# ???
-wordoccpoolstep int default=524288
-# ???
-wordpoolsize int default=262144
-# ???
-wordpoolstep int default=65536
-
-## Connect spec for transactionlog server.
-tlsspec string default=""
-
-## Document manager config
-documentmanagerconfigid reference
-
-functionmodules[] string restart
-
-specialchars string
-
-tokenlist[].name string
-tokenlist[].tokens[].token string
-tokenlist[].tokens[].replace string default=""
-
-afloat double default=34 range=[0,1002]
-
-## qr-searchers:
-tag.bold.open string default="<hi>"
-tag.bold.close string default="</hi>"
-tag.separator string default="<sep />"
-
-## This array contains the built-in searchers that should
-## normally always run in a Vespa-S system. The actual list is
-## in the global/ directory on the configserver.
-builtin[].searcher string
-
-## If for some reason you need to disable one of the built-in
-## searchers you can set this flag to "false". Handle with great
-## care. You need to match the array index from the global/
-## directory, and this may change depending on versions.
-builtin[].enabled bool default=true
-
-# some searcher specific configuration parameters:
-
-com.yahoo.prelude.searcher.FieldCollapsingSearcher.collapsesize int default=1
-com.yahoo.prelude.searcher.FieldCollapsingSearcher.extrafactor double default=2.0
-com.yahoo.prelude.searcher.FieldCollapsingSearcher.collapsefield string default="mid"
-
-com.yahoo.prelude.searcher.BlendingSearcher.numthreads int default=200
-com.yahoo.prelude.searcher.BlendingSearcher.docid string default=""
-
-com.yahoo.prelude.searcher.BoldingSearcher.source string default=""
-
-com.yahoo.prelude.searcher.JuniperSearcher.source string default=""
-com.yahoo.prelude.searcher.JuniperSearcher.defaultdoctype string default=""
-
-com.yahoo.prelude.searcher.XMLStringSearcher.source string default=""
-
-## relevancy as measured from the backend will usually be
-## normalized into the [0,1000] range to make blending between
-## several backends with different relevancy models possible; you
-## can elect to skip this if you only have backends using
-## relevancy scores that are directly comparable.
-com.yahoo.prelude.fastsearch.FastSearcher.skipnormalizing bool default=true
-
-## how many aggregation groups to fetch from the backend
-com.yahoo.prelude.grouping.AggregatingSearcher.maxgroups int default=100
-
-com.yahoo.prelude.querytransform.PhrasingSearcher.automatonfile string default=""
-com.yahoo.prelude.querytransform.NonPhrasingSearcher.automatonfile string default=""
-com.yahoo.prelude.querytransform.TermReplacingSearcher.termlist[] string
-com.yahoo.prelude.querytransform.CompleteBoostSearcher.source string default=""
-
-com.yahoo.prelude.querytransform.ExactStringSearcher.source string default=""
-com.yahoo.prelude.querytransform.LiteralBoostSearcher.source string default=""
-com.yahoo.prelude.querytransform.TermBoostSearcher.source string default=""
-com.yahoo.prelude.querytransform.NormalizingSearcher.source string default=""
-com.yahoo.prelude.querytransform.StemmingSearcher.source string default=""
-
-com.yahoo.prelude.statistics.StatisticsSearcher.latencybucketsize int default=30
-
-
-# here users may add their custom searchers
-# (all strings should be class names)
-customizedsearchers.rawquery[] string
-customizedsearchers.transformedquery[] string
-customizedsearchers.blendedresult[] string
-customizedsearchers.unblendedresult[] string
-customizedsearchers.backend[] string
-customizedsearchers.argument[].key string
-customizedsearchers.argument[].value string
-
-## This is for adding searchers which should be below BlendingSearcher,
-## but not be linked to any Vespa cluster (directly).
-external[].name string
-external[].searcher[] string
-
-# Search cluster specific information.
-## Name of search cluster.
-searchcluster[].name string default=""
-
-## Names of search definitions served by search cluster.
-searchcluster[].searchdef[] string
-
-## configid that may be used to get rank-profiles config for the cluster.
-searchcluster[].rankprofiles.configid reference default=""
-
-## Indexing mode of search cluster.
-searchcluster[].indexingmode enum { REALTIME, STREAMING } default=REALTIME
-
-## Storage cluster to use for search cluster if indexingmode is streaming.
-searchcluster[].storagecluster string default=""
-
-# The available dispatchers on each search cluster
-searchcluster[].dispatcher[].host string
-searchcluster[].dispatcher[].port int
-
-## The number of least significant bits of the part id used to specify the
-## row number (the rest of the bits specifies the column). Don't touch
-## this unless you know why you are doing it.
-searchcluster[].rowbits int default=0
-
-
-# 4 search-cluster-specific overrides of global cache parameters:
-
-## Internal searcher cache. Size is measured in megabytes of raw packet
-## size. Hits larger than 1% of total cache size will not be cached.
-searchcluster[].cache.size int default=1
-
-## Timeout for internal searcher cache. Entries older than this number
-## of seconds will be removed from cache. 0 means no cache timeout.
-## If cachetimeoutseconds is used, the cache is not purged when reindexing.
-searchcluster[].cache.timeout int default=-1
-
-## If timeoutwithpurging is set, the index will be purged (possibly
-## gradually) when index switching occurs, even if cache timeout > 0.
-## Not used if cache timeout is 0. If searchcluster[].cache.timeout is
-## not explicitly set, the global value will be used instead of the one
-## set locally.
-searchcluster[].cache.timeoutwithpurging bool default=false
-
-## Gradual cache switching causes the index to only be gradually purged
-## when cache switching occurs. cacheswitchseconds is the length of the period
-## when a given cache entry may be from either the previous or current
-## index. Setting it to 0 makes QRS purges the cache entirely when a new
-## index becomes available. At the end of the cacheswitchseconds period,
-## the cache will be cleaned of any remaining entries from the previous
-## index.
-searchcluster[].cache.switchseconds int default=-1
-
-# Per dispatcher config-id might be nice to have, remove it until needed.
-# searchcluster[].dispatcher[].configid reference
-
-# rank-profiles
-## name of this rank profile. maps to table index for internal use.
-rankprofile[].name string
-
-## the name of a generic property available to the feature execution framework and feature plugins
-rankprofile[].fef.property[].name string
-
-## the value of a generic property available to feature plugins
-rankprofile[].fef.property[].value string
-
-## the catalog name overrides apply to
-rankprofile[].catalog[].name string
-
-## Boost value for AND queries in this catalog.
-rankprofile[].catalog[].andboost int default=0
-
-## Boost value for OR queries in this catalog.
-rankprofile[].catalog[].orboost int default=0
-
-## Boost value for ANY queries in this catalog.
-rankprofile[].catalog[].anyboost int default=0
-
-## Boost value for NEAR queries in catalog.
-rankprofile[].catalog[].nearboost int default=0
-
-## Boost value for ORDEREDNEAR queries in this catalog.
-rankprofile[].catalog[].orderednearboost int default=0
-
-## Boost value for phrase queries in this catalog.
-rankprofile[].catalog[].phraseboost int default=0
-
-## Boost value for all queries in catalog.
-rankprofile[].catalog[].rankboost int default=0
-
-## If true, the context boost is the max value of
-## the individual contextboosts.
-## When false, the context boost when a term is in
-## several contexts is the sum of the individual contextboosts.
-rankprofile[].catalog[].bestcontextboostonly bool default=false
-
-
-## If true, then use extnumoccboost only when calculating rank values.
-## Also, do not normalize the extnumoccboost value with
-## global term frequency. Default value is false.
-rankprofile[].catalog[].extnumoccboostonly bool default=false
-
-## If yes, then use extnumoccboost only when calculating rank values.
-## Also, do not normalize the extnumoccboost value with
-## global term frequency. Default value is no.
-rankprofile[].catalog[].numoccandextnumoccboostonly bool default=false
-
-## If yes, then use bitvectors when possible.
-## Default value is false.
-rankprofile[].catalog[].preferbitvector bool default=false
-
-## Load extnumoccboost for this catalog from the named file.
-## extnumoccboost specifies boost values due to the number of
-## occurences of a term that are external to the document. If
-## "NULL" is given as file name, then all extnumoccboost values
-## will be set to 0.
-rankprofile[].catalog[].extnumoccboost.table string default="/home/vespa/conf/vespa/search/ranktables/constant-0000"
-
-## Load numoccboost for this catalog from the named file.
-## numoccboost specifies boost values due to the number of occurences in
-## a document. If "NULL" is given as file name, then all numoccboost
-## values will be set to 0.
-rankprofile[].catalog[].numoccboost.table string default="/home/vespa/conf/vespa/search/ranktables/constant-0000"
-
-## Load firstoccboost for catalog from the file named.
-## firstoccboost specifies boost values due to the position of the
-## first occurence in a document. If "NULL" is given as file name,
-## then all firstoccboost values will be set to 0.
-rankprofile[].catalog[].firstoccboost.table string default="/home/vespa/conf/vespa/search/ranktables/constant-0000"
-
-
-## Load firstoccproximityboost for this catalog from the file named.
-## firstoccproximity boost specifies boost values due to the correlation between
-## positions of the first occurence in a document for two and two words.
-##
-## If "NULL" is given as file name, then all
-## firstoccproximityboost values will be set to 0. If otherwise set,
-## should be the name of a file to load into the table. The file
-## should have 256 lines each containing a single integer.
-##
-## There are 256 elements in the table, handling forward distances from 1.
-## The corresponding firstoccrevproximityboost table is used
-## to handle closeness in reverse order.
-##
-## The last array index specifies the proximity table set. During
-## evaluation, the bigram proximity weight supplied by the query segmenter
-## specifies which proximity table set to use, with a fallback to set 0
-## when no information is available.
-rankprofile[].catalog[].firstoccproximityboost[].table string default="/home/vespa/conf/vespa/search/ranktables/constant-0000"
-
-## Load firstoccrevproximityboost table for this catalog from the named file.
-## Specifies boost values due to the correlation between positions
-## of the first occurence in a document for two and two words when
-## the second word in the query comes first in the document.
-## See also firstoccproximityboost above.
-rankprofile[].catalog[].firstoccrevproximityboost[].table string default="/home/vespa/conf/vespa/search/ranktables/constant-0000"
-
-## Load proximityboost for this catalog from the named file.
-## proximity boost specifies boost values due to the correlation between
-## positions of the occurences in a document for two and two words.
-## See also firstoccproximityboost above.
-rankprofile[].catalog[].proximityboost[].table string default="/home/vespa/conf/vespa/search/ranktables/constant-0000"
-
-## Load revproximityboost for this catalog from the named file.
-## revproximity boost specifies boost values due to the correlation between
-## positions of the occurences in a document for two and two words.
-## See also firstoccproximityboost above.
-rankprofile[].catalog[].revproximityboost[].table string default="/home/vespa/conf/vespa/search/ranktables/constant-0000"
-
-## Load divtable for this catalog from the named file.
-## Rank values for a query term are divided by the entry
-## in divtable indexed by log2 of term frequence.
-## The file should contain ?? lines each with a single integer.
-rankprofile[].catalog[].divtable string default=""
-
-## The name of a context in this catalog to specify boosts for.
-rankprofile[].catalog[].context[].name string
-
-## Boost occurrences in this context with the given value.
-## XXX -1 uses default (???) from somewhere(TM).
-rankprofile[].catalog[].context[].contextboost int default=0
-
-## Boost pair of occurrences in this context with
-## the given value when evaluating 2 words from same catalog in
-## parallell.
-## XXX -1 uses default (???) from somewhere(TM).
-rankprofile[].catalog[].context[].commoncontextboost.pair int default=0
-
-## Boost triple of occurrences in this context with
-## the given value when evaluating 3 words from same catalog in
-## parallell.
-## XXX -1 uses default (???) from somewhere(TM).
-rankprofile[].catalog[].context[].commoncontextboost.triple int default=0
-
-## Boost quad of occurrences in this context with
-## the given value when evaluating 4 words from same catalog in
-## parallell.
-## XXX -1 uses default (???) from somewhere(TM).
-rankprofile[].catalog[].context[].commoncontextboost.quad int default=0
-
-
-## The name of the attribute
-rankprofile[].attribute[].name string
-
-## Boost value for queries that hit in this attribute
-rankprofile[].attribute[].attributecontextboost int default=0
-
-## Load weightboost for this attribute from the named file.
-## weightboost specifies boost values due to the weight (weighted set)
-## or number of occurences (single, array) in an attribute.
-## If "NULL" is given as file name, then all weightboost values will be set to 0.
-rankprofile[].attribute[].weightboost.table string default="/home/vespa/conf/vespa/search/ranktables/constant-0000"
-
-
-## Load static rank values from the given staticrank docattr vector.
-## Must be specified in index.cf as a staticrankfile.
-rankprofile[].staticrankfile string default=""
-
-## Multiply static rank values with given value when calculating total
-## rank value.
-rankprofile[].staticcoefficient int default=1
-
-## If false then use only static ranking when sorting result hits.
-## Default is true.
-rankprofile[].dynamicranking bool default=true
-
-## If dynamic ranking is turned off, then ascending will sort the
-## result hits with lowest static rank values first, while
-## descending will sort with highest static rank values
-## first. Default is descending. This keyword has no effect if
-## dynamic ranking is on.
-rankprofile[].staticranksortorder string default="descending"
-
-## Load static rank mapping from the file named table. The static
-## rank mapping maps each 8-bit static rank value into a 32-bit static
-## rank value. This option may only be used with 8-bit static rank files.
-rankprofile[].staticrankmap string default=""
-
-## If set to "true", total rank will be reduced when dynamic rank is less than
-## 25% of static rank, to suppress irrelevant hits from popular sites.
-## If set to "false", total rank is not reduced.
-rankprofile[].clampstaticrank bool default=false
-
-## Load document datetime values used for freshness boost calculation from
-## this file. The values must be coded as minutes since
-## 1900-01-01T00:00Z. The value 0 has the special meaning
-## "no datetime value exists".
-rankprofile[].freshnessboost.file string default=""
-
-## Load freshnessboost lookup-table values from the file named
-## table instead of using built-in default values. The file must
-## contain 32 white-space separated non-negative integers.
-rankprofile[].freshnessboost.table string default="/home/vespa/conf/vespa/search/ranktables/constant-0000"
-
-## When calculating the freshness boost value multiply difference between
-## current datetime and document datetime with timeoffset before taking
-## the base-2 logarithm. Default value is 1. Max value is 31.
-rankprofile[].freshnessboost.timeoffset int default=1
-
-## If a document has datetime value 0, then use defaultboostvalue
-## as freshness boost value instead of doing table lookup. The default
-## default value is 0 (no boost).
-rankprofile[].freshnessboost.defaultboostvalue int default=0
-
-## Multiply freshness boost value with coefficient when calculating
-## total freshness boost value. If coefficient 0 is used, no freshness
-## boost value will be computed or added. Default value is 0.
-rankprofile[].freshnessboost.coefficient int default=0
-
-## boost table files for distance ranking, 1 dimension.
-## The tables have 465 elements each, where slots 0..15 represents
-## distances 0..15 while the remaining slots represents distance
-## (16 + (slot & 15)) << ((slot >> 4) - 1). Linear interpolation is
-## used for distances "between" table slots.
-##
-## If "NULL" is given as the file name then all 1D distance boost values
-## for that table will be set to 0.
-rankprofile[].distance1dboosttable[].table string
-
-## boost table files for distance ranking, 2 dimensions.
-## The tables have 977 elements each, where slots 0..15 represents
-## square of distance being 0..15 while the remaining slots represents
-## square of distance distance being
-## (16 + (slot & 15)) << ((slot >> 4) - 1). Linear interpolation is
-## used for distances "between" table slots.
-##
-## If "NULL" is given as the file name then all 2D distance boost values
-## for that table will be set to 0.
-rankprofile[].distance2dboosttable[].table string
-
-## The lowest possible size of a ranked result. This is the lower ramp
-## of the percentage specified in the binsize variable. The default is
-## specified in fsearchrc.
-rankprofile[].binlow int default=-1
-
-## The high limit of the ranked result bin. If the percentage of the
-## resultset specified in binsize is higher than this limit, this will be
-## the max size. The default is specified in fsearchrc.
-rankprofile[].binhigh int default=-1
-
-## The size of the ranked results as a percentage of the total result
-## set size. The percentage can be ramped off with the binlow and binhigh
-## variables. The default is specified in fsearchrc.
-rankprofile[].binsize double default=-1
-
-## Minimum value for maximum value of number of 'posocc' entries for a word.
-## The default is specified in fsearchrc.
-rankprofile[].posbinlow int default=-1
-
-## Maximum value for maximum value of number of 'posocc' entries for a word.
-## The default is specified in fsearchrc.
-rankprofile[].posbinhigh int default=-1
-
-## The maximum value for number of 'posocc' entries for a word, specified
-## as a percentage of the number of documents in the index. If more
-## entries are needed for evaluation, posocc entries are not used for that
-## word and evaluation will be performed without full proximity support.
-## The percentage can be ramped off with the posbinlow and posbinhigh
-## variables. The default is specified in fsearchrc.
-rankprofile[].posbinsize int default=-1
-
-## After all other rank calculations, the rank value is tuned according
-## to the tunefactor and tunebias values. The rank value is modified
-## as follows: new_rank = old_rank * tunefactor + tunebias.
-rankprofile[].tunefactor double default=1.0
-
-## After all other rank calculations, the rank value is tuned according
-## to the tunefactor and tunebias values. The rank value is modified
-## as follows: new_rank = old_rank * tunefactor + tunebias.
-rankprofile[].tunebias int default=0
-
-## A lower limit for the rankvalue of the results returned from the
-## search node. If rankcutoff.advanced is set to "true", determines
-## the constant value used in the internal advanced rank cutoff
-## calculations. This roughly reflects the expected rank contribution
-## of one good term.
-## The rankcutoff.val value and the rankcutoff.advanced parameter
-## may be used if you only want hits with a minimum relevancy to show
-## up in the resultset.
-## A value below zero means no rankcutoff is done.
-rankprofile[].rankcutoff.val int default=-1
-
-## When rankcutoff.val is in use, this flag controls whether to use
-## an internal calculation is used for determining the rank cutoff
-## value. If "false", use rankcutoff.val as a direct lower limit.
-rankprofile[].rankcutoff.advanced bool default=false
-
-## If set to "ON", use of posocc files is enabled, except when
-## "forceemptyposoccs" is set in fsearchrc or posocc files doesn't exist.
-## If set to "OFF", use of posocc files is disabled.
-## If "NOTSET" the fsearchrc "proximity" parameter is used instead.
-rankprofile[].proximity.full.enable enum { OFF, ON, NOTSET } default=NOTSET
-
-## If set to "ON", use of firstoccproximity is enabled.
-## If set to "OFF", use of firstoccproximity is disabled.
-## When NOTSET use the firstoccproximity value in fsearchrc configuration.
-rankprofile[].proximity.firstocc.enable enum { OFF, ON, NOTSET } default=NOTSET
-
-## If set to "ON", use of proximity (cf. proximity and firstoccproximity)
-## will affect phrases in addition to single words.
-## If set to "OFF", proximity is never used for phrases.
-## When NOTSET use the phraseproximity value in fsearchrc configuration.
-rankprofile[].proximity.phrase.enable enum { OFF, ON, NOTSET } default=NOTSET
-
-## Selects behavior when proximity can be used for two words but not three
-## words while firstoccproximity can be used for three words.
-## If set to "ON", then use proximity for two words.
-## If set to "OFF", then use firstoccproximity for three words.
-## When NOTSET use the proximitypairbeforefirstoccproximitytriple value
-## in fsearchrc configuration.
-rankprofile[].proximity.pairbeforefirstocctriple.enable enum { OFF, ON, NOTSET } default=NOTSET
-
-## Selects behavior when proximity can be used for three words but not four
-## words while firstoccproximity can be used for four words.
-## If set to "ON", then use proximity for three words.
-## If set to "OFF", then use firstoccproximity for four words.
-## When NOTSET use the proximitytriplebeforefirstoccproximityquad value
-rankprofile[].proximity.triplebeforefirstoccquad.enable enum { OFF, ON, NOTSET } default=NOTSET
diff --git a/config/src/test/java/com/yahoo/vespa/config/configsglobal/qr-templates.3.cfg b/config/src/test/java/com/yahoo/vespa/config/configsglobal/qr-templates.3.cfg
deleted file mode 100644
index 345c20157f9..00000000000
--- a/config/src/test/java/com/yahoo/vespa/config/configsglobal/qr-templates.3.cfg
+++ /dev/null
@@ -1,111 +0,0 @@
-washing -0.45
-washer 0
-hey[2]
-hey[0].ho[1]
-hey[0].ho[0].lets[3]
-hey[0].ho[0].lets[0].go "slayer"
-hey[0].ho[0].lets[0].fishing -1
-hey[0].ho[0].lets[0].ref JA
-hey[0].ho[0].lets[1].go "gate"
-hey[0].ho[0].lets[1].ref :parent:
-hey[0].ho[0].lets[str].go "strung"
-hey[0].ho[0].me 0.0
-hey[fooo].ho[0]
-
-hey[0] 78
-
-longVal 500
-longWithRange -9000000000
-longArr[2]
-longArr[0] 0
-longArr[1] 1
-longArrWithRange[1]
-longArrWithRange[0] 9000000000
-
-bar.arrline[0] "foo"
-bar.arrline[1] "bar"
-
-fileVal "/nodefault/file"
-fileArr[2]
-fileArr[0] "keyb.com"
-fileArr[1] "2b4a64d0cb36d44ba8a52506d9fe480bf3511be6"
-
-urlprefix "foo"
-templateset[2]
-templateset[0].urlprefix "/basic"
-#templateset[0].mimetype "text/xml"
-templateset[0].encoding "ut\"f-8\n' <hit relevancy=\"$relevancy\">\n#foreach"
-templateset[0].headertemplate "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<resultset totalhits=\"$result.hitCount\">\n"
-templateset[0].footertemplate "</resultset>\n"
-templateset[0].nohitstemplate "<empty/>\n"
-templateset[0].hittemplate "<hit relevancy=\"$relevancy\">\n#foreach( $key in $hit.getPropertyKeySet() )\n <field name='$key'>$hit.getPropertyXML($key)</field>\n#end\n</hit>\n"
-templateset[0].errortemplate "<ERROR CODE=\"$result.error.code\">$result.error.message</ERROR>\n"
-templateset[1].urlprefix "/xsearch"
-templateset[1].mimetype "text/xml"
-templateset[1].encoding "utf-8"
-templateset[1].rankprofile 0
-templateset[1].headertemplate "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<RESULTSET TOTALHITS=\"$result.hitCount\">\n"
-templateset[1].footertemplate "</RESULTSET>\n"
-templateset[1].nohitstemplate "0.56"
-templateset[1].hittemplate "<HIT RELEVANCY=\"$relevancy\" TYPE=\"$hit.typeString\">\n<FIELD NAME=\"uri\">$uri</FIELD>\n<FIELD NAME=\"category\">$category</FIELD>\n<FIELD NAME=\"bsumtitle\">$bsumtitle</FIELD>\n</HIT>\n"
-templateset[1].errortemplate "45"
-config[0].id :parent:
-config[0].autostart
-hi[0].there[0].e BATCH
-ilscript[music].name music
-ilscript[music].doctype music
-ilscript[music].content[1]
-ilscript[music].content[0] "\"music\" | summary sddocname | lowercase | index sddocname;"
-
-musum *
-
-auran "value=\"Confirm\"/></form>\"\"Tuna - step three."
-
-route[1]
-route[0].name "search/cluster.books2"
-route[0].selector "books.isbn=\"none\""
-route[0].feed "books"
-
-languages[3]
-languages[2] "swahili"
-
-languages2[2]
-languages2[0] "swedish"
-
-foolang[5]
-foolang[0].lang[3]
-foolang[0].lang[1] "Swahili"
-foolang[1].lang[3]
-foolang[2].lang[3]
-foolang[2].lang[0] "Setswana"
-foolang[3].lang[4]
-foolang[3].lang[3] "Norwegian"
-foolang[4].lang[2]
-
-myIntMap{"foo"} 67
-myIntMap{"bar"} 68
-myStringMap{"fo"} "gh"
-myStringMap{"ba"} "ij"
-myStructMap{"FOO"}.myInt 78
-myStructMap{"FOO"}.myString "myFoo"
-myStructMap{"FOOO"}.myInt 89
-myStructMap{"FOOO"}.myString "myFooS"
-myStructMap{"FOOO"}.myIntDef -99
-myStructMap{"FOOO"}.myStringDef "myFooSBall"
-
-myStructMap{"FOO"}.myStruct.a "guitar"
-myStructMap{"FOO"}.myStruct.c /tmp
-
-myStructMap{"FOOO"}.myStruct.a "bass"
-myStructMap{"FOOO"}.myStruct.b "drums"
-myStructMap{"FOOO"}.myStruct.c /var/log
-
-myStructMap{"FOO"}.myStructNestedArray[0] -9
-myStructMap{"FOO"}.myStructNestedArray[1] -10
-
-myStructMap{"FOO"}.myNestedLeafMap{"Nested1"} 90
-myStructMap{"FOO"}.myNestedLeafMap{"Nested2"} 9000
-
-myStructMap{"FOO"}.myNestedMap{"Nested3"}.myLong 809
-myStructMap{"FOO"}.myNestedMap{"Nested3"}.myLongDef 810
-myStructMap{"FOO"}.myNestedMap{"Nested4"}.myLong 811
diff --git a/config/src/test/java/com/yahoo/vespa/config/configsglobal/testfoobar.12.cfg b/config/src/test/java/com/yahoo/vespa/config/configsglobal/testfoobar.12.cfg
deleted file mode 100644
index 3d23f7fd29a..00000000000
--- a/config/src/test/java/com/yahoo/vespa/config/configsglobal/testfoobar.12.cfg
+++ /dev/null
@@ -1,105 +0,0 @@
-vh[5]
-vh[0] 0.3345
-vh[1] -0.3
-vh[2] 134.3
-vh[3] 234.34
-vh[4] -24.3
-bg[5]
-bg[0] 5
-bg[1]
-bg[2] 10
-bg[3] 4
-bg[4] -2
-gee[2]
-gee[0] "jabbaTuna"
-gee[1] jabba:500/dusteri
-storage[2]
-storage[0].distributor[2]
-storage[0].distributor[0] "tra"
-storage[0].distributor[1] "de"
-storage[0].feeder[1]
-storage[0].feeder[0] "li"
-#storage[0].feeder[1] "dum"
-storage[1].distributor[3]
-storage[1].distributor[0] "Etra"
-storage[1].distributor[1] "Ede"
-#storage[1].feeder[3]
-storage[1].feeder[0] "Eli"
-storage[1].feeder[1] "Edum"
-storage[1].feeder[2] "TEdum"
-
-#ju[].hu[].tu[] double default=45 range=[0,100.1]
-#ju[].hu[].wu[] enum { HEY, HO} default=HO
-#ju[].hu[].tang[] bool default=false
-#ju[].hu[].clan[] int default=45 range=[-90,90]
-
-ju[2]
-ju[0].hu[2]
-ju[0].hu[0].tu[2]
-ju[0].hu[0].tu[0] 45.0
-ju[0].hu[0].tu[1] 0.0
-ju[0].hu[0].tang[2]
-ju[0].hu[0].tang[0] true
-#ju[0].hu[0].tang[1] 0.0
-
-#ju[0].hu[1].tu[2]
-ju[0].hu[1].tu[0] 667.865
-ju[0].hu[0].wu[2]
-ju[0].hu[0].wu[0] HEY
-ju[0].hu[0].wu[1] HEY
-ju[0].hu[1].wu[1]
-ju[0].hu[1].wu[0] HO
-
-ju[1].hu[2]
-ju[1].hu[0].tu[2]
-ju[1].hu[0].tu[0] 78
-ju[1].hu[0].tu[1] 78.9
-ju[1].hu[1].tu[1]
-ju[1].hu[1].tu[0] 88.9
-
-ju[1].hu[0].wu[3]
-ju[1].hu[0].wu[0] HEY
-ju[1].hu[0].wu[1] HEY
-ju[1].hu[0].wu[2] HO
-ju[1].hu[1].wu[1]
-ju[1].hu[1].wu[0] HO
-ju[1].hu[1].clan[4]
-ju[1].hu[1].clan[0] 5
-ju[1].hu[1].clan[1] 6
-ju[1].hu[1].clan[2] 7
-ju[1].hu[1].clan[3] 8
-foo "123aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa gh:sann\" da"
-headertemplate "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\">\n<html>\n<head>\n<title>ranqualizer:$hostname:$profile:</title>\n<link type=\"text/css\" rel=\"stylesheet\" href=\"/layout.css\"/>\n<meta name=\"ROBOTS\" content=\"NOINDEX,NOFOLLOW\"/><meta http-equiv=\"Cache-control\" content=\"no-cache\"/><meta http-equiv=\"Cache-control\" content=\"must-revalidate\"/><meta http-equiv=\"Cache-control\" content=\"max-age=0\"/><meta http-equiv=\"Content-type\" content=\"text/html; charset=utf-8\"/><link rel=\"icon\" href=\"/favicon.ico\"/><script type=\"text/javascript\">function show(foo,f){document.getElementById(foo).style.display=\"block\";} function hide(foo,f){document.getElementById(foo).style.display = \"none\";}</script></head>\n<body>\n"
-ptport 10108
-rankcf :parent:
-hport 10109
-frtport 10107
-transportnodelay true
-partition 0
-datasetdir none
-proximity "yes"
-vsmconfig ""
-documentmanagerconfigid :parent:
-include: search/cluster.music
-include: search/cluster.music2
-
-rankprofile[extra].fef.property[4]
-rankprofile[ignore].fef.property[4].name "vespa.dump.ignoredefaultfeatures"
-rankprofile[ignore].fef.property[4].value true
-
-specialchars "索 索尼 DVD±R"
-
-tokenlist[1]
-tokenlist[0].name "default"
-tokenlist[0].tokens[8]
-tokenlist[0].tokens[0].token "c++"
-tokenlist[0].tokens[1].token "wal-mart"
-tokenlist[0].tokens[1].replace "walmart"
-tokenlist[0].tokens[2].token ".net"
-tokenlist[0].tokens[3].token "-ç´¢"
-tokenlist[0].tokens[4].token "sony"
-tokenlist[0].tokens[4].replace "索尼"
-tokenlist[0].tokens[5].token "dvd+-r"
-tokenlist[0].tokens[6].token "DVD±R"
-tokenlist[0].tokens[7].token "dvdplusminusr"
-tokenlist[0].tokens[7].replace "dvd+-r"
diff --git a/configdefinitions/src/vespa/configserver.def b/configdefinitions/src/vespa/configserver.def
index c90709bf4dd..0e32540ec83 100644
--- a/configdefinitions/src/vespa/configserver.def
+++ b/configdefinitions/src/vespa/configserver.def
@@ -68,3 +68,4 @@ sleepTimeWhenRedeployingFails long default=30
# Feature Flags (poor man's feature flags, to be overridden in configserver-config.xml if needed)
deleteApplicationLegacy bool default=false
buildMinimalSetOfConfigModels bool default=true
+useDedicatedNodeForLogserver bool default=false
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index 914d6963ff0..acf49fe51be 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -125,6 +125,7 @@ public class ModelContextImpl implements ModelContext {
private final Set<Rotation> rotations;
private final boolean isBootstrap;
private final boolean isFirstTimeDeployment;
+ private final boolean useDedicatedNodeForLogserver;
public Properties(ApplicationId applicationId,
boolean multitenant,
@@ -136,7 +137,8 @@ public class ModelContextImpl implements ModelContext {
Zone zone,
Set<Rotation> rotations,
boolean isBootstrap,
- boolean isFirstTimeDeployment) {
+ boolean isFirstTimeDeployment,
+ boolean useDedicatedNodeForLogserver) {
this.applicationId = applicationId;
this.multitenant = multitenant;
this.configServerSpecs = configServerSpecs;
@@ -148,6 +150,7 @@ public class ModelContextImpl implements ModelContext {
this.rotations = rotations;
this.isBootstrap = isBootstrap;
this.isFirstTimeDeployment = isFirstTimeDeployment;
+ this.useDedicatedNodeForLogserver = useDedicatedNodeForLogserver;
}
@Override
@@ -186,6 +189,9 @@ public class ModelContextImpl implements ModelContext {
@Override
public boolean isFirstTimeDeployment() { return isFirstTimeDeployment; }
+
+ @Override
+ public boolean useDedicatedNodeForLogserver() { return useDedicatedNodeForLogserver; }
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintenance.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintenance.java
index c1fc484e23c..82b692e682f 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintenance.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintenance.java
@@ -19,7 +19,7 @@ import java.time.Duration;
*/
public class ConfigServerMaintenance extends AbstractComponent {
- private final TenantsMaintainer tenantsMaintainer;
+ //private final TenantsMaintainer tenantsMaintainer;
private final ZooKeeperDataMaintainer zooKeeperDataMaintainer;
private final FileDistributionMaintainer fileDistributionMaintainer;
private final SessionsMaintainer sessionsMaintainer;
@@ -30,7 +30,8 @@ public class ConfigServerMaintenance extends AbstractComponent {
Curator curator,
FileDistributionFactory fileDistributionFactory) {
DefaultTimes defaults = new DefaultTimes(configserverConfig);
- tenantsMaintainer = new TenantsMaintainer(applicationRepository, curator, defaults.tenantsMaintainerInterval);
+ // TODO: Disabled until we have application metadata about applications
+ //tenantsMaintainer = new TenantsMaintainer(applicationRepository, curator, defaults.tenantsMaintainerInterval);
zooKeeperDataMaintainer = new ZooKeeperDataMaintainer(applicationRepository, curator, defaults.defaultInterval);
fileDistributionMaintainer = new FileDistributionMaintainer(applicationRepository, curator, defaults.defaultInterval, configserverConfig);
sessionsMaintainer = new SessionsMaintainer(applicationRepository, curator, defaults.defaultInterval);
@@ -38,7 +39,7 @@ public class ConfigServerMaintenance extends AbstractComponent {
@Override
public void deconstruct() {
- tenantsMaintainer.deconstruct();
+ //tenantsMaintainer.deconstruct();
zooKeeperDataMaintainer.deconstruct();
fileDistributionMaintainer.deconstruct();
sessionsMaintainer.deconstruct();
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java
index c18d3c7fe48..b6346677d6b 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.modelfactory;
-import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
@@ -16,6 +15,7 @@ import com.yahoo.config.provision.Version;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.config.server.ConfigServerSpec;
import com.yahoo.vespa.config.server.GlobalComponentRegistry;
+import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
import com.yahoo.vespa.config.server.tenant.Rotations;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.application.Application;
@@ -54,7 +54,8 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
public ActivatedModelsBuilder(TenantName tenant, long appGeneration, SessionZooKeeperClient zkClient, GlobalComponentRegistry globalComponentRegistry) {
super(globalComponentRegistry.getModelFactoryRegistry(),
globalComponentRegistry.getConfigserverConfig(),
- globalComponentRegistry.getZone());
+ globalComponentRegistry.getZone(),
+ HostProvisionerProvider.from(globalComponentRegistry.getHostProvisioner()));
this.tenant = tenant;
this.appGeneration = appGeneration;
this.zkClient = zkClient;
@@ -74,6 +75,7 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
Instant now) {
log.log(LogLevel.DEBUG, String.format("Loading model version %s for session %s application %s",
modelFactory.getVersion(), appGeneration, applicationId));
+ ModelContext.Properties modelContextProperties = createModelContextProperties(applicationId);
ModelContext modelContext = new ModelContextImpl(
applicationPackage,
Optional.empty(),
@@ -81,8 +83,8 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
logger,
configDefinitionRepo,
getForVersionOrLatest(applicationPackage.getFileRegistryMap(), modelFactory.getVersion()).orElse(new MockFileRegistry()),
- createStaticProvisioner(applicationPackage.getAllocatedHosts()),
- createModelContextProperties(applicationId),
+ createStaticProvisioner(applicationPackage.getAllocatedHosts(), modelContextProperties),
+ modelContextProperties,
Optional.empty(),
new com.yahoo.component.Version(modelFactory.getVersion().toString()),
wantedNodeVespaVersion);
@@ -115,7 +117,8 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
zone(),
new Rotations(curator, TenantRepository.getTenantPath(tenant)).readRotationsFromZooKeeper(applicationId),
false, // We may be bootstrapping, but we only know and care during prepare
- false); // Always false, assume no one uses it when activating
+ false, // Always false, assume no one uses it when activating
+ configserverConfig.useDedicatedNodeForLogserver());
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
index 15834a9eaa0..27343e71a87 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
@@ -5,6 +5,7 @@ import com.google.common.util.concurrent.UncheckedTimeoutException;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.model.api.HostProvisioner;
+import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.api.ModelFactory;
import com.yahoo.config.provision.AllocatedHosts;
import com.yahoo.config.provision.ApplicationId;
@@ -16,6 +17,8 @@ import com.yahoo.lang.SettableOptional;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.config.server.http.InternalServerException;
import com.yahoo.vespa.config.server.http.UnknownVespaVersionException;
+import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
+import com.yahoo.vespa.config.server.provision.ProvisionerAdapter;
import com.yahoo.vespa.config.server.provision.StaticProvisioner;
import java.time.Instant;
@@ -48,11 +51,15 @@ public abstract class ModelsBuilder<MODELRESULT extends ModelResult> {
private final Zone zone;
- protected ModelsBuilder(ModelFactoryRegistry modelFactoryRegistry, ConfigserverConfig configserverConfig, Zone zone) {
+ private final HostProvisionerProvider hostProvisionerProvider;
+
+ ModelsBuilder(ModelFactoryRegistry modelFactoryRegistry, ConfigserverConfig configserverConfig,
+ Zone zone, HostProvisionerProvider hostProvisionerProvider) {
this.modelFactoryRegistry = modelFactoryRegistry;
this.configserverConfig = configserverConfig;
this.hosted = configserverConfig.hostedVespa();
this.zone = zone;
+ this.hostProvisionerProvider = hostProvisionerProvider;
}
/** Returns the zone this is running in */
@@ -211,10 +218,15 @@ public abstract class ModelsBuilder<MODELRESULT extends ModelResult> {
* returns empty otherwise, which may either mean that no hosts are allocated or that we are running
* non-hosted and should default to use hosts defined in the application package, depending on context
*/
- protected Optional<HostProvisioner> createStaticProvisioner(Optional<AllocatedHosts> allocatedHosts) {
+ Optional<HostProvisioner> createStaticProvisioner(Optional<AllocatedHosts> allocatedHosts, ModelContext.Properties properties) {
if (hosted && allocatedHosts.isPresent())
- return Optional.of(new StaticProvisioner(allocatedHosts.get()));
+ return Optional.of(new StaticProvisioner(allocatedHosts.get(), createNodeRepositoryProvisioner(properties).get()));
return Optional.empty();
}
+ Optional<HostProvisioner> createNodeRepositoryProvisioner(ModelContext.Properties properties) {
+ return hostProvisionerProvider.getHostProvisioner().map(
+ provisioner -> new ProvisionerAdapter(provisioner, properties.applicationId()));
+ }
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java
index 56bdd432d90..4c43abf5faa 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java
@@ -25,7 +25,6 @@ import com.yahoo.vespa.config.server.application.PermanentApplicationPackage;
import com.yahoo.vespa.config.server.deploy.ModelContextImpl;
import com.yahoo.vespa.config.server.filedistribution.FileDistributionProvider;
import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
-import com.yahoo.vespa.config.server.provision.ProvisionerAdapter;
import com.yahoo.vespa.config.server.provision.StaticProvisioner;
import com.yahoo.vespa.config.server.session.FileDistributionFactory;
import com.yahoo.vespa.config.server.session.PrepareParams;
@@ -52,7 +51,6 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P
private final DeployLogger logger;
private final PrepareParams params;
private final FileDistributionFactory fileDistributionFactory;
- private final HostProvisionerProvider hostProvisionerProvider;
private final Optional<ApplicationSet> currentActiveApplicationSet;
private final ModelContext.Properties properties;
@@ -67,12 +65,11 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P
Optional<ApplicationSet> currentActiveApplicationSet,
ModelContext.Properties properties,
ConfigserverConfig configserverConfig) {
- super(modelFactoryRegistry, configserverConfig, properties.zone());
+ super(modelFactoryRegistry, configserverConfig, properties.zone(), hostProvisionerProvider);
this.permanentApplicationPackage = permanentApplicationPackage;
this.configDefinitionRepo = configDefinitionRepo;
this.fileDistributionFactory = fileDistributionFactory;
- this.hostProvisionerProvider = hostProvisionerProvider;
this.context = context;
this.logger = logger;
@@ -127,12 +124,8 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P
Optional<HostProvisioner> nodeRepositoryProvisioner = createNodeRepositoryProvisioner(properties);
if ( ! allocatedHosts.isPresent()) return nodeRepositoryProvisioner;
- Optional<HostProvisioner> staticProvisioner = createStaticProvisioner(allocatedHosts);
+ Optional<HostProvisioner> staticProvisioner = createStaticProvisioner(allocatedHosts, properties);
if ( ! staticProvisioner.isPresent()) return Optional.empty(); // Since we have hosts allocated this means we are on non-hosted
-
- // The following option should not be possible, but since there is a right action for it we can take it
- if ( ! nodeRepositoryProvisioner.isPresent())
- return Optional.of(new StaticProvisioner(allocatedHosts.get()));
// Nodes are already allocated by a model and we should use them unless this model requests hosts from a
// previously unallocated cluster. This allows future models to stop allocate certain clusters.
@@ -154,12 +147,6 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P
.collect(Collectors.toList()));
}
- private Optional<HostProvisioner> createNodeRepositoryProvisioner(ModelContext.Properties properties) {
- return hostProvisionerProvider.getHostProvisioner().map(
- provisioner -> new ProvisionerAdapter(provisioner, properties.applicationId()));
- }
-
-
/** The result of preparing a single model version */
public static class PreparedModelResult implements ModelResult {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/provision/StaticProvisioner.java b/configserver/src/main/java/com/yahoo/vespa/config/server/provision/StaticProvisioner.java
index 7e97690331f..b31f4501767 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/provision/StaticProvisioner.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/provision/StaticProvisioner.java
@@ -5,7 +5,6 @@ import com.yahoo.config.model.api.HostProvisioner;
import com.yahoo.config.provision.*;
import java.util.List;
-import java.util.Optional;
import java.util.stream.Collectors;
/**
@@ -21,13 +20,6 @@ public class StaticProvisioner implements HostProvisioner {
private final HostProvisioner fallback;
/**
- * Creates a static host provisioner with no fallback
- */
- public StaticProvisioner(AllocatedHosts allocatedHosts) {
- this(allocatedHosts, null);
- }
-
- /**
* Creates a static host provisioner which will fall back to using the given provisioner
* if a request is made for nodes in a cluster which is not present in this allocation.
*/
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
index 49287669a06..4edde0904a4 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
@@ -159,7 +159,8 @@ public class SessionPreparer {
zone,
rotationsSet,
params.isBootstrap(),
- ! currentActiveApplicationSet.isPresent());
+ ! currentActiveApplicationSet.isPresent(),
+ configserverConfig.useDedicatedNodeForLogserver());
this.preparedModelsBuilder = new PreparedModelsBuilder(modelFactoryRegistry,
permanentApplicationPackage,
configDefinitionRepo,
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantListener.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantListener.java
index 8778d6a585c..b643a0664e8 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantListener.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantListener.java
@@ -17,14 +17,14 @@ public interface TenantListener {
* @param tenant name of newly created tenant.
* @param provider provider of request and reload handlers for new tenant.
*/
- public void onTenantCreate(TenantName tenant, TenantHandlerProvider provider);
+ void onTenantCreate(TenantName tenant, TenantHandlerProvider provider);
/**
* Called whenever a tenant is deleted.
*
* @param tenant name of deleted tenant.
*/
- public void onTenantDelete(TenantName tenant);
+ void onTenantDelete(TenantName tenant);
/**
* Called when all tenants have been loaded at startup.
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java
index 28fc179770a..43425aec13b 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ModelContextImplTest.java
@@ -24,7 +24,7 @@ import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
/**
- * @author lulf
+ * @author Ulf Lilleengen
*/
public class ModelContextImplTest {
@Test
@@ -52,6 +52,7 @@ public class ModelContextImplTest {
Zone.defaultZone(),
rotations,
false,
+ false,
false),
Optional.empty(),
new Version(6),
@@ -69,5 +70,6 @@ public class ModelContextImplTest {
assertFalse(context.properties().hostedVespa());
assertThat(context.properties().rotations(), equalTo(rotations));
assertThat(context.properties().isFirstTimeDeployment(), equalTo(false));
+ assertThat(context.properties().useDedicatedNodeForLogserver(), equalTo(false));
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/TestComponentRegistry.java b/configserver/src/test/java/com/yahoo/vespa/config/server/TestComponentRegistry.java
index 5f00499598a..58bd1485dad 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/TestComponentRegistry.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/TestComponentRegistry.java
@@ -27,7 +27,6 @@ import java.time.Clock;
import java.util.Collections;
import java.util.Optional;
-import static com.yahoo.vespa.config.server.SuperModelRequestHandlerTest.emptyNodeFlavors;
/**
* @author Ulf Lilleengen
@@ -56,7 +55,6 @@ public class TestComponentRegistry implements GlobalComponentRegistry {
ModelFactoryRegistry modelFactoryRegistry,
PermanentApplicationPackage permanentApplicationPackage,
FileDistributionFactory fileDistributionFactory,
- SuperModelGenerationCounter superModelGenerationCounter,
HostRegistries hostRegistries,
ConfigserverConfig configserverConfig,
SessionPreparer sessionPreparer,
@@ -72,7 +70,7 @@ public class TestComponentRegistry implements GlobalComponentRegistry {
this.configserverConfig = configserverConfig;
this.reloadListener = reloadListener;
this.tenantListener = tenantListener;
- this.superModelGenerationCounter = superModelGenerationCounter;
+ this.superModelGenerationCounter = new SuperModelGenerationCounter(curator);
this.defRepo = defRepo;
this.permanentApplicationPackage = permanentApplicationPackage;
this.hostRegistries = hostRegistries;
@@ -88,7 +86,6 @@ public class TestComponentRegistry implements GlobalComponentRegistry {
public static class Builder {
private Curator curator = new MockCurator();
- private Optional<ConfigCurator> configCurator = Optional.empty();
private Metrics metrics = Metrics.createTestMetrics();
private ConfigserverConfig configserverConfig = new ConfigserverConfig(
new ConfigserverConfig.Builder()
@@ -115,11 +112,6 @@ public class TestComponentRegistry implements GlobalComponentRegistry {
return this;
}
- public Builder configCurator(ConfigCurator configCurator) {
- this.configCurator = Optional.ofNullable(configCurator);
- return this;
- }
-
public Builder metrics(Metrics metrics) {
this.metrics = metrics;
return this;
@@ -155,21 +147,16 @@ public class TestComponentRegistry implements GlobalComponentRegistry {
.orElse(new PermanentApplicationPackage(configserverConfig));
FileDistributionFactory fileDistributionFactory = this.fileDistributionFactory
.orElse(new MockFileDistributionFactory(configserverConfig));
- HostProvisionerProvider hostProvisionerProvider = hostProvisioner.isPresent() ?
- HostProvisionerProvider.withProvisioner(hostProvisioner.get()) :
- HostProvisionerProvider.empty();
+ HostProvisionerProvider hostProvisionerProvider = hostProvisioner.
+ map(HostProvisionerProvider::withProvisioner).orElseGet(HostProvisionerProvider::empty);
SessionPreparer sessionPreparer = new SessionPreparer(modelFactoryRegistry, fileDistributionFactory,
hostProvisionerProvider, permApp,
configserverConfig, defRepo, curator,
zone);
- return new TestComponentRegistry(curator, configCurator.orElse(ConfigCurator.create(curator)),
- metrics, modelFactoryRegistry,
- permApp,
- fileDistributionFactory,
- new SuperModelGenerationCounter(curator),
- hostRegistries, configserverConfig, sessionPreparer,
- hostProvisioner, defRepo, reloadListener,
- tenantListener, zone, clock);
+ return new TestComponentRegistry(curator, ConfigCurator.create(curator), metrics, modelFactoryRegistry,
+ permApp, fileDistributionFactory, hostRegistries, configserverConfig,
+ sessionPreparer, hostProvisioner, defRepo, reloadListener, tenantListener,
+ zone, clock);
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandlerTest.java
index 75a2d2f778d..b4e3f2374be 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/SessionActiveHandlerTest.java
@@ -43,7 +43,6 @@ import com.yahoo.vespa.config.server.session.SessionTest;
import com.yahoo.vespa.config.server.session.SessionZooKeeperClient;
import com.yahoo.vespa.config.server.tenant.TenantBuilder;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
-import com.yahoo.vespa.config.server.zookeeper.ConfigCurator;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.model.VespaModelFactory;
@@ -80,7 +79,6 @@ public class SessionActiveHandlerTest extends SessionHandlerTest {
private static final String activatedMessage = " for tenant '" + tenantName + "' activated.";
private final Clock clock = Clock.systemUTC();
- private ConfigCurator configCurator;
private Curator curator;
private RemoteSessionRepo remoteSessionRepo;
private LocalSessionRepo localRepo;
@@ -99,14 +97,12 @@ public class SessionActiveHandlerTest extends SessionHandlerTest {
remoteSessionRepo = new RemoteSessionRepo(tenantName);
applicationRepo = new MemoryTenantApplications();
curator = new MockCurator();
- configCurator = ConfigCurator.create(curator);
localRepo = new LocalSessionRepo(clock, curator);
pathPrefix = "/application/v2/tenant/" + tenantName + "/session/";
hostProvisioner = new MockProvisioner();
modelFactory = new VespaModelFactory(new NullConfigModelRegistry());
componentRegistry = new TestComponentRegistry.Builder()
.curator(curator)
- .configCurator(configCurator)
.modelFactoryRegistry(new ModelFactoryRegistry(Collections.singletonList(modelFactory)))
.build();
TenantBuilder tenantBuilder = TenantBuilder.create(componentRegistry, tenantName)
@@ -218,7 +214,8 @@ public class SessionActiveHandlerTest extends SessionHandlerTest {
private RemoteSession createRemoteSession(long sessionId, Session.Status status, SessionZooKeeperClient zkClient, Clock clock) throws IOException {
zkClient.writeStatus(status);
- ZooKeeperClient zkC = new ZooKeeperClient(configCurator, new BaseDeployLogger(), false, TenantRepository.getSessionsPath(tenantName).append(String.valueOf(sessionId)));
+ ZooKeeperClient zkC = new ZooKeeperClient(componentRegistry.getConfigCurator(), new BaseDeployLogger(), false,
+ TenantRepository.getSessionsPath(tenantName).append(String.valueOf(sessionId)));
zkC.write(Collections.singletonMap(modelFactory.getVersion(), new MockFileRegistry()));
zkC.write(AllocatedHosts.withHosts(Collections.emptySet()));
RemoteSession session = new RemoteSession(tenantName, sessionId, componentRegistry, zkClient, clock);
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java
index badcdf53b77..17ad741d182 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java
@@ -30,7 +30,7 @@ public class StaticProvisionerTest {
InMemoryProvisioner inMemoryHostProvisioner = new InMemoryProvisioner(false, "host1.yahoo.com", "host2.yahoo.com", "host3.yahoo.com", "host4.yahoo.com");
VespaModel firstModel = createModel(app, inMemoryHostProvisioner);
- StaticProvisioner staticProvisioner = new StaticProvisioner(firstModel.allocatedHosts());
+ StaticProvisioner staticProvisioner = new StaticProvisioner(firstModel.allocatedHosts(), null);
VespaModel secondModel = createModel(app, staticProvisioner);
assertModelConfig(firstModel, secondModel);
diff --git a/container-core/src/main/java/com/yahoo/container/handler/LogHandler.java b/container-core/src/main/java/com/yahoo/container/handler/LogHandler.java
index 4c12bacf145..d1f84aefeaa 100644
--- a/container-core/src/main/java/com/yahoo/container/handler/LogHandler.java
+++ b/container-core/src/main/java/com/yahoo/container/handler/LogHandler.java
@@ -17,10 +17,16 @@ import java.util.concurrent.Executor;
public class LogHandler extends ThreadedHttpRequestHandler {
private static final String LOG_DIRECTORY = "/home/y/logs/vespa/logarchive/";
+ private final LogReader logReader;
@Inject
public LogHandler(Executor executor) {
+ this(executor, new LogReader());
+ }
+
+ protected LogHandler(Executor executor, LogReader logReader) {
super(executor);
+ this.logReader = logReader;
}
@Override
@@ -30,10 +36,9 @@ public class LogHandler extends ThreadedHttpRequestHandler {
HashMap<String, String> apiParams = getParameters(request);
long earliestLogThreshold = getEarliestThreshold(apiParams);
long latestLogThreshold = getLatestThreshold(apiParams);
- LogReader logReader= new LogReader(earliestLogThreshold, latestLogThreshold);
try {
- JSONObject logJson = logReader.readLogs(LOG_DIRECTORY);
- responseJSON.put("logs", logJson.toString());
+ JSONObject logJson = logReader.readLogs(LOG_DIRECTORY, earliestLogThreshold, latestLogThreshold);
+ responseJSON.put("logs", logJson);
} catch (IOException | JSONException e) {
return new HttpResponse(404) {
@Override
diff --git a/container-core/src/main/java/com/yahoo/container/handler/LogReader.java b/container-core/src/main/java/com/yahoo/container/handler/LogReader.java
index 2483f2497d0..ae43d850258 100644
--- a/container-core/src/main/java/com/yahoo/container/handler/LogReader.java
+++ b/container-core/src/main/java/com/yahoo/container/handler/LogReader.java
@@ -7,18 +7,16 @@ import javax.xml.bind.DatatypeConverter;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
+import java.nio.file.attribute.BasicFileAttributes;
public class LogReader {
long earliestLogThreshold;
long latestLogThreshold;
- public LogReader(long earliestLogThreshold, long latestLogThreshold) {
+ protected JSONObject readLogs(String logDirectory, long earliestLogThreshold, long latestLogThreshold) throws IOException, JSONException {
this.earliestLogThreshold = earliestLogThreshold;
this.latestLogThreshold = latestLogThreshold;
- }
-
- protected JSONObject readLogs(String logDirectory) throws IOException, JSONException {
JSONObject json = new JSONObject();
File root = new File(logDirectory);
traverse_folder(root, json, "");
@@ -28,9 +26,7 @@ public class LogReader {
private void traverse_folder(File root, JSONObject json, String filename) throws IOException, JSONException {
File[] files = root.listFiles();
for(File child : files) {
- File temp = child;
- JSONObject childJson = new JSONObject();
- long logTime = child.lastModified();
+ long logTime = Files.readAttributes(child.toPath(), BasicFileAttributes.class).creationTime().toMillis();
if(child.isFile() && earliestLogThreshold < logTime && logTime < latestLogThreshold) {
json.put(filename + child.getName(), DatatypeConverter.printBase64Binary(Files.readAllBytes(child.toPath())));
}
diff --git a/container-core/src/test/java/com/yahoo/container/handler/LogHandlerTest.java b/container-core/src/test/java/com/yahoo/container/handler/LogHandlerTest.java
new file mode 100644
index 00000000000..5a3b62be287
--- /dev/null
+++ b/container-core/src/test/java/com/yahoo/container/handler/LogHandlerTest.java
@@ -0,0 +1,54 @@
+package com.yahoo.container.handler;
+
+import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.container.jdisc.HttpResponse;
+import org.json.JSONException;
+import org.json.JSONObject;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.concurrent.Executor;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+
+public class LogHandlerTest {
+
+
+ @Test
+ public void handleCorrectlyParsesQueryParameters() throws IOException {
+ MockLogReader mockLogReader = new MockLogReader();
+ LogHandler logHandler = new LogHandler(mock(Executor.class), mockLogReader);
+
+ {
+ String uri = "http://myhost.com:1111/logs?from=1000&to=2000";
+ HttpResponse response = logHandler.handle(HttpRequest.createTestRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.GET));
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ response.render(bos);
+ String expectedResponse = "{\"logs\":{\"one\":\"newer_log\"}}";
+ assertEquals(expectedResponse, bos.toString());
+ }
+
+ {
+ String uri = "http://myhost.com:1111/logs?from=0&to=1000";
+ HttpResponse response = logHandler.handle(HttpRequest.createTestRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.GET));
+ ByteArrayOutputStream bos = new ByteArrayOutputStream();
+ response.render(bos);
+ String expectedResponse = "{\"logs\":{\"two\":\"older_log\"}}";
+ assertEquals(expectedResponse, bos.toString());
+ }
+
+ }
+
+ class MockLogReader extends LogReader {
+ @Override
+ protected JSONObject readLogs(String logDirectory, long earliestLogThreshold, long latestLogThreshold) throws JSONException {
+ if(latestLogThreshold > 1000) {
+ return new JSONObject("{\"one\":\"newer_log\"}");
+ } else {
+ return new JSONObject("{\"two\":\"older_log\"}");
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/container-core/src/test/java/com/yahoo/container/handler/LogReaderTest.java b/container-core/src/test/java/com/yahoo/container/handler/LogReaderTest.java
index 534026f89ac..ad779f6b7b4 100644
--- a/container-core/src/test/java/com/yahoo/container/handler/LogReaderTest.java
+++ b/container-core/src/test/java/com/yahoo/container/handler/LogReaderTest.java
@@ -20,8 +20,8 @@ public class LogReaderTest {
@Test
public void testThatFilesAreWrittenCorrectlyToOutputStream() throws Exception{
String logDirectory = "src/test/resources/logfolder/";
- LogReader logReader = new LogReader(21, Long.MAX_VALUE);
- JSONObject json = logReader.readLogs(logDirectory);
+ LogReader logReader = new LogReader();
+ JSONObject json = logReader.readLogs(logDirectory, 21, Long.MAX_VALUE);
String expected = "{\"subfolder-log2.log\":\"VGhpcyBpcyBhbm90aGVyIGxvZyBmaWxl\",\"log1.log\":\"VGhpcyBpcyBvbmUgbG9nIGZpbGU=\"}";
String actual = json.toString();
assertEquals(expected, actual);
@@ -30,8 +30,8 @@ public class LogReaderTest {
@Test
public void testThatLogsOutsideRangeAreExcluded() throws Exception {
String logDirectory = "src/test/resources/logfolder/";
- LogReader logReader = new LogReader(Long.MAX_VALUE, Long.MIN_VALUE);
- JSONObject json = logReader.readLogs(logDirectory);
+ LogReader logReader = new LogReader();
+ JSONObject json = logReader.readLogs(logDirectory, Long.MAX_VALUE, Long.MIN_VALUE);
String expected = "{}";
String actual = json.toString();
assertEquals(expected, actual);
diff --git a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java b/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java
index 27298ce6c82..fbc03a38682 100644
--- a/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java
+++ b/container-di/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentNode.java
@@ -36,6 +36,7 @@ import static com.yahoo.container.di.componentgraph.core.Keys.createKey;
* @author ollivir
*/
public class ComponentNode extends Node {
+
private static final Logger log = Logger.getLogger(ComponentNode.class.getName());
private final Class<?> clazz;
diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FS4CloseableChannel.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FS4CloseableChannel.java
index dc95f83365e..10a640c54c8 100644
--- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FS4CloseableChannel.java
+++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FS4CloseableChannel.java
@@ -21,15 +21,17 @@ import com.yahoo.search.result.HitGroup;
import java.io.IOException;
import java.util.Iterator;
+import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
import java.util.logging.Logger;
import static com.yahoo.prelude.fastsearch.VespaBackEndSearcher.hitIterator;
+import static java.util.Arrays.asList;
/**
* {@link CloseableChannel} implementation for FS4 nodes and fdispatch
- *
+ *
* @author ollivir
*/
public class FS4CloseableChannel extends CloseableChannel {
@@ -37,6 +39,14 @@ public class FS4CloseableChannel extends CloseableChannel {
private FS4Channel channel;
private final Optional<Integer> distributionKey;
+ private ErrorMessage pendingSearchError = null;
+ private Query query = null;
+ private QueryPacket queryPacket = null;
+
+ private int expectedFillResults = 0;
+ private CacheKey summaryCacheKey = null;
+ private DocsumPacketKey[] summaryPacketKeys = null;
+
public FS4CloseableChannel(VespaBackEndSearcher searcher, Query query, FS4ResourcePool fs4ResourcePool, String hostname, int port,
int distributionKey) {
this.searcher = searcher;
@@ -56,32 +66,47 @@ public class FS4CloseableChannel extends CloseableChannel {
}
@Override
- public Result search(Query query, QueryPacket queryPacket, CacheKey cacheKey) throws IOException {
+ protected void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException {
if (isLoggingFine())
getLogger().finest("sending query packet");
+ if(queryPacket == null) {
+ // query changed for subchannel
+ queryPacket = searcher.createQueryPacket(query);
+ }
+
+ this.query = query;
+ this.queryPacket = queryPacket;
+
try {
boolean couldSend = channel.sendPacket(queryPacket);
- if (!couldSend)
- return new Result(query, ErrorMessage.createBackendCommunicationError("Could not reach '" + getName() + "'"));
+ if (!couldSend) {
+ pendingSearchError = ErrorMessage.createBackendCommunicationError("Could not reach '" + getName() + "'");
+ }
} catch (InvalidChannelException e) {
- return new Result(query, ErrorMessage.createBackendCommunicationError("Invalid channel " + getName()));
+ pendingSearchError = ErrorMessage.createBackendCommunicationError("Invalid channel " + getName());
} catch (IllegalStateException e) {
- return new Result(query, ErrorMessage.createBackendCommunicationError("Illegal state in FS4: " + e.getMessage()));
+ pendingSearchError = ErrorMessage.createBackendCommunicationError("Illegal state in FS4: " + e.getMessage());
}
+ }
+ @Override
+ protected List<Result> getSearchResults(CacheKey cacheKey) throws IOException {
+ if(pendingSearchError != null) {
+ return asList(new Result(query, pendingSearchError));
+ }
BasicPacket[] basicPackets;
try {
basicPackets = channel.receivePackets(query.getTimeLeft(), 1);
} catch (ChannelTimeoutException e) {
- return new Result(query, ErrorMessage.createTimeout("Timeout while waiting for " + getName()));
+ return asList(new Result(query, ErrorMessage.createTimeout("Timeout while waiting for " + getName())));
} catch (InvalidChannelException e) {
- return new Result(query, ErrorMessage.createBackendCommunicationError("Invalid channel for " + getName()));
+ return asList(new Result(query, ErrorMessage.createBackendCommunicationError("Invalid channel for " + getName())));
}
if (basicPackets.length == 0) {
- return new Result(query, ErrorMessage.createBackendCommunicationError(getName() + " got no packets back"));
+ return asList(new Result(query, ErrorMessage.createBackendCommunicationError(getName() + " got no packets back")));
}
if (isLoggingFine())
@@ -118,53 +143,67 @@ public class FS4CloseableChannel extends CloseableChannel {
cacheControl.cache(cacheKey, query, new DocsumPacketKey[0], packets, distributionKey);
}
}
- return result;
+ return asList(result);
}
@Override
- public void partialFill(Result result, String summaryClass) {
- Packet[] receivedPackets;
- DocsumPacketKey[] packetKeys;
-
- CacheKey cacheKey = null;
- PacketWrapper packetWrapper = null;
+ protected void sendPartialFillRequest(Result result, String summaryClass) {
+ summaryCacheKey = null;
if (searcher.getCacheControl().useCache(channel.getQuery())) {
- cacheKey = fetchCacheKeyFromHits(result.hits(), summaryClass);
- if (cacheKey == null) {
+ summaryCacheKey = fetchCacheKeyFromHits(result.hits(), summaryClass);
+ if (summaryCacheKey == null) {
QueryPacket queryPacket = QueryPacket.create(channel.getQuery());
- cacheKey = new CacheKey(queryPacket);
+ summaryCacheKey = new CacheKey(queryPacket);
+ }
+ boolean cacheFound = cacheLookupTwoPhase(summaryCacheKey, result, summaryClass);
+ if (!cacheFound) {
+ summaryCacheKey = null;
}
- packetWrapper = cacheLookupTwoPhase(cacheKey, result, summaryClass);
}
if (countFastHits(result) > 0) {
- packetKeys = getPacketKeys(result, summaryClass, false);
- if (packetKeys.length == 0) {
- receivedPackets = new Packet[0];
+ summaryPacketKeys = getPacketKeys(result, summaryClass);
+ if (summaryPacketKeys.length == 0) {
+ expectedFillResults = 0;
} else {
try {
- receivedPackets = fetchSummaries(result, summaryClass);
+ expectedFillResults = requestSummaries(result, summaryClass);
} catch (InvalidChannelException e) {
result.hits()
.addError(ErrorMessage.createBackendCommunicationError("Invalid channel " + getName() + " (summary fetch)"));
return;
- } catch (ChannelTimeoutException e) {
- result.hits().addError(ErrorMessage.createTimeout("timeout waiting for summaries from " + getName()));
- return;
} catch (IOException e) {
result.hits().addError(ErrorMessage.createBackendCommunicationError(
"IO error while talking on channel " + getName() + " (summary fetch): " + e.getMessage()));
return;
}
- if (receivedPackets.length == 0) {
- result.hits()
- .addError(ErrorMessage.createBackendCommunicationError(getName() + " got no packets back (summary fetch)"));
- return;
- }
}
} else {
- packetKeys = new DocsumPacketKey[0];
- receivedPackets = new Packet[0];
+ expectedFillResults = 0;
+ }
+ }
+
+
+ @Override
+ protected void getPartialFillResults(Result result, String summaryClass) {
+ if (expectedFillResults == 0) {
+ return;
+ }
+
+ Packet[] receivedPackets;
+ try {
+ receivedPackets = getSummaryResponses(result);
+ } catch (InvalidChannelException e1) {
+ result.hits().addError(ErrorMessage.createBackendCommunicationError("Invalid channel " + getName() + " (summary fetch)"));
+ return;
+ } catch (ChannelTimeoutException e1) {
+ result.hits().addError(ErrorMessage.createTimeout("timeout waiting for summaries from " + getName()));
+ return;
+ }
+
+ if (receivedPackets.length == 0) {
+ result.hits().addError(ErrorMessage.createBackendCommunicationError(getName() + " got no packets back (summary fetch)"));
+ return;
}
int skippedHits;
@@ -183,8 +222,8 @@ public class FS4CloseableChannel extends CloseableChannel {
"Error filling hits with summary fields, source: " + getName() + " Exception thrown: " + e.getMessage()));
return;
}
- if (skippedHits == 0 && packetWrapper != null) {
- searcher.getCacheControl().updateCacheEntry(cacheKey, channel.getQuery(), packetKeys, receivedPackets);
+ if (skippedHits == 0 && summaryCacheKey != null) {
+ searcher.getCacheControl().updateCacheEntry(summaryCacheKey, channel.getQuery(), summaryPacketKeys, receivedPackets);
}
if (skippedHits > 0)
@@ -216,12 +255,12 @@ public class FS4CloseableChannel extends CloseableChannel {
}
}
- private PacketWrapper cacheLookupTwoPhase(CacheKey cacheKey, Result result, String summaryClass) {
+ private boolean cacheLookupTwoPhase(CacheKey cacheKey, Result result, String summaryClass) {
Query query = result.getQuery();
PacketWrapper packetWrapper = searcher.getCacheControl().lookup(cacheKey, query);
if (packetWrapper == null) {
- return null;
+ return false;
}
if (packetWrapper.getNumPackets() != 0) {
for (Iterator<Hit> i = hitIterator(result); i.hasNext();) {
@@ -241,7 +280,7 @@ public class FS4CloseableChannel extends CloseableChannel {
result.analyzeHits();
}
- return packetWrapper;
+ return true;
}
private CacheKey fetchCacheKeyFromHits(HitGroup hits, String summaryClass) {
@@ -269,10 +308,8 @@ public class FS4CloseableChannel extends CloseableChannel {
return count;
}
- private Packet[] fetchSummaries(Result result, String summaryClass)
- throws InvalidChannelException, ChannelTimeoutException, ClassCastException, IOException {
+ private int requestSummaries(Result result, String summaryClass) throws InvalidChannelException, ClassCastException, IOException {
- BasicPacket[] receivedPackets;
boolean summaryNeedsQuery = searcher.summaryNeedsQuery(result.getQuery());
if (result.getQuery().getTraceLevel() >= 3)
result.getQuery().trace((summaryNeedsQuery ? "Resending " : "Not resending ") + "query during document summary fetching", 3);
@@ -287,7 +324,15 @@ public class FS4CloseableChannel extends CloseableChannel {
boolean couldSend = channel.sendPacket(docsumsPacket);
if (!couldSend)
throw new IOException("Could not successfully send GetDocSumsPacket.");
- receivedPackets = channel.receivePackets(result.getQuery().getTimeLeft(), docsumsPacket.getNumDocsums() + 1);
+
+ return docsumsPacket.getNumDocsums() + 1;
+ }
+
+ private Packet[] getSummaryResponses(Result result) throws InvalidChannelException, ChannelTimeoutException {
+ if(expectedFillResults == 0) {
+ return new Packet[0];
+ }
+ BasicPacket[] receivedPackets = channel.receivePackets(result.getQuery().getTimeLeft(), expectedFillResults);
return convertBasicPackets(receivedPackets);
}
@@ -295,11 +340,9 @@ public class FS4CloseableChannel extends CloseableChannel {
/**
* Returns an array of the hits contained in a result
*
- * @param filled
- * true to return all hits, false to return only unfilled hits
* @return array of docids, empty array if no hits
*/
- private DocsumPacketKey[] getPacketKeys(Result result, String summaryClass, boolean filled) {
+ private DocsumPacketKey[] getPacketKeys(Result result, String summaryClass) {
DocsumPacketKey[] packetKeys = new DocsumPacketKey[result.getHitCount()];
int x = 0;
@@ -307,7 +350,7 @@ public class FS4CloseableChannel extends CloseableChannel {
com.yahoo.search.result.Hit hit = i.next();
if (hit instanceof FastHit) {
FastHit fastHit = (FastHit) hit;
- if (filled || !fastHit.isFilled(summaryClass)) {
+ if (!fastHit.isFilled(summaryClass)) {
packetKeys[x] = new DocsumPacketKey(fastHit.getGlobalId(), fastHit.getPartId(), summaryClass);
x++;
}
diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastSearcher.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastSearcher.java
index d34d119c1fe..9acf48a7c67 100644
--- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastSearcher.java
@@ -28,6 +28,7 @@ import com.yahoo.search.searchchain.Execution;
import edu.umd.cs.findbugs.annotations.NonNull;
import java.io.IOException;
+import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
@@ -64,9 +65,9 @@ public class FastSearcher extends VespaBackEndSearcher {
private final Dispatcher dispatcher;
private final Backend dispatchBackend;
-
+
private final FS4ResourcePool fs4ResourcePool;
-
+
/**
* Creates a Fastsearcher.
*
@@ -99,7 +100,7 @@ public class FastSearcher extends VespaBackEndSearcher {
public Pong ping(Ping ping, Execution execution) {
return ping(ping, dispatchBackend, getName());
}
-
+
public static Pong ping(Ping ping, Backend backend, String name) {
FS4Channel channel = backend.openPingChannel();
@@ -151,6 +152,7 @@ public class FastSearcher extends VespaBackEndSearcher {
}
}
+ @Override
protected void transformQuery(Query query) {
QueryRewrite.rewriteSddocname(query);
}
@@ -160,7 +162,8 @@ public class FastSearcher extends VespaBackEndSearcher {
if (dispatcher.searchCluster().groupSize() == 1)
forceSinglePassGrouping(query);
try(CloseableChannel channel = getChannel(query)) {
- Result result = channel.search(query, queryPacket, cacheKey);
+ List<Result> results = channel.search(query, queryPacket, cacheKey);
+ Result result = mergeResults(results, query, execution);
if (query.properties().getBoolean(Ranking.RANKFEATURES, false)) {
// There is currently no correct choice for which
@@ -182,13 +185,13 @@ public class FastSearcher extends VespaBackEndSearcher {
return result;
}
}
-
+
/** When we only search a single node, doing all grouping in one pass is more efficient */
private void forceSinglePassGrouping(Query query) {
for (GroupingRequest groupingRequest : query.getSelect().getGrouping())
forceSinglePassGrouping(groupingRequest.getRootOperation());
}
-
+
private void forceSinglePassGrouping(GroupingOperation operation) {
operation.setForceSinglePass(true);
for (GroupingOperation childOperation : operation.getChildren())
@@ -231,6 +234,7 @@ public class FastSearcher extends VespaBackEndSearcher {
* @param result result containing a partition of the unfilled hits
* @param summaryClass the summary class we want to fill with
**/
+ @Override
protected void doPartialFill(Result result, String summaryClass) {
if (result.isFilled(summaryClass)) return;
@@ -262,6 +266,34 @@ public class FastSearcher extends VespaBackEndSearcher {
return false;
}
+ private Result mergeResults(List<Result> results, Query query, Execution execution) {
+ if(results.size() == 1) {
+ return results.get(0);
+ }
+
+ Result result = new Result(query);
+
+ for (Result partialResult : results) {
+ result.mergeWith(partialResult);
+ result.hits().addAll(partialResult.hits().asUnorderedHits());
+ }
+
+ if (query.getOffset() != 0 || result.hits().size() > query.getHits()) {
+ // with multiple results, each partial result is expected to have
+ // offset = 0 to allow correct offset positioning after merge
+
+ if (result.getHitOrderer() != null) {
+ // Make sure we have the necessary data for sorting
+ fill(result, Execution.ATTRIBUTEPREFETCH, execution);
+ }
+ result.hits().trim(query.getOffset(), query.getHits());
+ }
+
+ // TODO grouping
+
+ return result;
+ }
+
private static @NonNull Optional<String> quotedSummaryClass(String summaryClass) {
return Optional.of(summaryClass == null ? "[null]" : quote(summaryClass));
}
diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/VespaBackEndSearcher.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/VespaBackEndSearcher.java
index a6f98418a76..409d05e3aaf 100644
--- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/VespaBackEndSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/VespaBackEndSearcher.java
@@ -213,11 +213,7 @@ public abstract class VespaBackEndSearcher extends PingableSearcher {
if (root == null || root instanceof NullItem) // root can become null after resolving and transformation?
return new Result(query);
- QueryPacket queryPacket = QueryPacket.create(query);
- int compressionLimit = query.properties().getInteger(PACKET_COMPRESSION_LIMIT, 0);
- queryPacket.setCompressionLimit(compressionLimit);
- if (compressionLimit != 0)
- queryPacket.setCompressionType(query.properties().getString(PACKET_COMPRESSION_TYPE, "lz4"));
+ QueryPacket queryPacket = createQueryPacket(query);
if (isLoggingFine())
getLogger().fine("made QueryPacket: " + queryPacket);
@@ -241,6 +237,15 @@ public abstract class VespaBackEndSearcher extends PingableSearcher {
return result;
}
+ protected QueryPacket createQueryPacket(Query query) {
+ QueryPacket queryPacket = QueryPacket.create(query);
+ int compressionLimit = query.properties().getInteger(PACKET_COMPRESSION_LIMIT, 0);
+ queryPacket.setCompressionLimit(compressionLimit);
+ if (compressionLimit != 0)
+ queryPacket.setCompressionType(query.properties().getString(PACKET_COMPRESSION_TYPE, "lz4"));
+ return queryPacket;
+ }
+
/**
* Returns a cached result, or null if no result was cached for this key
*
@@ -355,7 +360,7 @@ public abstract class VespaBackEndSearcher extends PingableSearcher {
s.append(" location=")
.append(query.getRanking().getLocation().toString());
}
-
+
if (query.getGroupingSessionCache()) {
s.append(" groupingSessionCache=true");
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/CloseableChannel.java b/container-search/src/main/java/com/yahoo/search/dispatch/CloseableChannel.java
index 3f5ebe53d0d..fc337d589ec 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/CloseableChannel.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/CloseableChannel.java
@@ -8,19 +8,37 @@ import com.yahoo.search.Result;
import java.io.Closeable;
import java.io.IOException;
+import java.util.List;
/**
- * CloseableChannel is an interface for running a search query and getting document summaries against some
- * content node, node group or dispatcher while abstracting the specifics of the invocation target.
+ * CloseableChannel is an interface for running a search query and getting document summaries against some content node, node group or
+ * dispatcher while abstracting the specifics of the invocation target. ClosebleChannel objects are stateful and should not be reused.
*
* @author ollivir
*/
public abstract class CloseableChannel implements Closeable {
- /** Retrieve the hits for the given {@link Query} */
- public abstract Result search(Query query, QueryPacket queryPacket, CacheKey cacheKey) throws IOException;
+ /** Retrieve the hits for the given {@link Query}. The channel may return more than one result, in
+ * which case the caller is responsible for merging the results. If multiple results are returned
+ * and the search query had a hit offset other than zero, that offset will be set to zero and the
+ * number of requested hits will be adjusted accordingly. */
+ public List<Result> search(Query query, QueryPacket queryPacket, CacheKey cacheKey) throws IOException {
+ sendSearchRequest(query, queryPacket);
+ return getSearchResults(cacheKey);
+ }
+
+ protected abstract void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException;
+
+ protected abstract List<Result> getSearchResults(CacheKey cacheKey) throws IOException;
/** Retrieve document summaries for the unfilled hits in the given {@link Result} */
- public abstract void partialFill(Result result, String summaryClass);
+ public void partialFill(Result result, String summaryClass) {
+ sendPartialFillRequest(result, summaryClass);
+ getPartialFillResults(result, summaryClass);
+ }
+
+ protected abstract void getPartialFillResults(Result result, String summaryClass);
+
+ protected abstract void sendPartialFillRequest(Result result, String summaryClass);
protected abstract void closeChannel();
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
index 0cf18852dd3..ce0d48f5638 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/Dispatcher.java
@@ -27,6 +27,7 @@ import com.yahoo.slime.Cursor;
import com.yahoo.slime.Slime;
import com.yahoo.vespa.config.search.DispatchConfig;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -286,21 +287,31 @@ public class Dispatcher extends AbstractComponent {
}
public Optional<CloseableChannel> getDispatchedChannel(VespaBackEndSearcher searcher, Query query) {
- Optional<SearchCluster.Group> groupInCluster = loadBalancer.takeGroupForQuery(query);
+ if (!query.getSelect().getGrouping().isEmpty()) {
+ return Optional.empty();
+ }
- return groupInCluster.flatMap(group -> {
- if(group.nodes().size() == 1) {
- SearchCluster.Node node = group.nodes().iterator().next();
- query.trace(false, 2, "Dispatching internally to ", group, " (", node.toString(), ")");
- CloseableChannel channel = new FS4CloseableChannel(searcher, query, fs4ResourcePool, node.hostname(), node.fs4port(), node.key());
- channel.teardown(() -> {
- loadBalancer.releaseGroup(group);
- });
- return Optional.of(channel);
- } else {
- loadBalancer.releaseGroup(group);
- return Optional.empty();
+ Optional<SearchCluster.Group> groupInCluster = loadBalancer.takeGroupForQuery(query);
+ if (!groupInCluster.isPresent()) {
+ return Optional.empty();
+ }
+ SearchCluster.Group group = groupInCluster.get();
+ query.trace(false, 2, "Dispatching internally to ", group);
+
+ if (group.nodes().size() == 1) {
+ SearchCluster.Node node = group.nodes().iterator().next();
+ CloseableChannel channel = new FS4CloseableChannel(searcher, query, fs4ResourcePool, node.hostname(), node.fs4port(),
+ node.key());
+ return Optional.of(channel);
+ } else {
+ query.setNoCache(true); // Note - multi-node request disables packet based caching
+
+ Map<Integer, CloseableChannel> subchannels = new HashMap<>();
+ for (SearchCluster.Node node : group.nodes()) {
+ subchannels.put(node.key(), new FS4CloseableChannel(searcher, query, fs4ResourcePool, node.hostname(), node.fs4port(), node.key()));
}
- });
+ CloseableChannel multinode = new InterleavedCloseableChannel(subchannels);
+ return Optional.of(multinode);
+ }
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedCloseableChannel.java b/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedCloseableChannel.java
new file mode 100644
index 00000000000..e461f6fc725
--- /dev/null
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/InterleavedCloseableChannel.java
@@ -0,0 +1,98 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.search.dispatch;
+
+import com.yahoo.fs4.QueryPacket;
+import com.yahoo.prelude.fastsearch.CacheKey;
+import com.yahoo.prelude.fastsearch.FastHit;
+import com.yahoo.search.Query;
+import com.yahoo.search.Result;
+import com.yahoo.search.result.Hit;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * InterleavedCloseableChannel uses multiple {@link CloseableChannel} objects to interface with
+ * content nodes in parallel. Operationally it first sends requests to all channels and then
+ * collects the results. The invoker of this class is responsible for merging the results if
+ * needed.
+ *
+ * @author ollivir
+ */
+public class InterleavedCloseableChannel extends CloseableChannel {
+ private final Map<Integer, CloseableChannel> subchannels;
+ private Map<Integer, Result> expectedFillResults = null;
+
+ public InterleavedCloseableChannel(Map<Integer, CloseableChannel> subchannels) {
+ this.subchannels = subchannels;
+ }
+
+ /** Sends search queries to the contained {@link CloseableChannel} subchannels. If the
+ * search query has an offset other than zero, it will be reset to zero and the expected
+ * hit amount will be adjusted accordingly. */
+ @Override
+ protected void sendSearchRequest(Query query, QueryPacket queryPacket) throws IOException {
+ for (CloseableChannel subchannel : subchannels.values()) {
+ Query subquery = query.clone();
+
+ subquery.setHits(subquery.getHits() + subquery.getOffset());
+ subquery.setOffset(0);
+ subchannel.sendSearchRequest(subquery, null);
+ }
+ }
+
+ @Override
+ protected List<Result> getSearchResults(CacheKey cacheKey) throws IOException {
+ List<Result> results = new ArrayList<>();
+
+ for (CloseableChannel subchannel : subchannels.values()) {
+ results.addAll(subchannel.getSearchResults(cacheKey));
+ }
+ return results;
+ }
+
+ @Override
+ protected void sendPartialFillRequest(Result result, String summaryClass) {
+ expectedFillResults = new HashMap<>();
+
+ for (Iterator<Hit> it = result.hits().deepIterator(); it.hasNext();) {
+ Hit hit = it.next();
+ if (hit instanceof FastHit) {
+ FastHit fhit = (FastHit) hit;
+ Result res = expectedFillResults.computeIfAbsent(fhit.getDistributionKey(), dk -> new Result(result.getQuery()));
+ res.hits().add(fhit);
+ }
+ }
+ expectedFillResults.forEach((distKey, partialResult) -> {
+ CloseableChannel channel = subchannels.get(distKey);
+ if (channel != null) {
+ channel.sendPartialFillRequest(partialResult, summaryClass);
+ }
+ });
+ }
+
+ @Override
+ protected void getPartialFillResults(Result result, String summaryClass) {
+ if (expectedFillResults == null) {
+ return;
+ }
+ expectedFillResults.forEach((distKey, partialResult) -> {
+ CloseableChannel channel = subchannels.get(distKey);
+ if (channel != null) {
+ channel.getPartialFillResults(partialResult, summaryClass);
+ }
+ });
+ }
+
+ @Override
+ protected void closeChannel() {
+ if (!subchannels.isEmpty()) {
+ subchannels.values().forEach(CloseableChannel::close);
+ subchannels.clear();
+ }
+ }
+}
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/LoadBalancer.java b/container-search/src/main/java/com/yahoo/search/dispatch/LoadBalancer.java
index 269d16fd24d..455696c16b1 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/LoadBalancer.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/LoadBalancer.java
@@ -26,17 +26,14 @@ public class LoadBalancer {
private static final CompoundName QUERY_NODE_GROUP_AFFINITY = new CompoundName("dispatch.group.affinity");
- private final boolean isInternallyDispatchable;
private final List<GroupSchedule> scoreboard;
private int needle = 0;
public LoadBalancer(SearchCluster searchCluster) {
if (searchCluster == null) {
- this.isInternallyDispatchable = false;
this.scoreboard = null;
return;
}
- this.isInternallyDispatchable = (searchCluster.groupSize() == 1);
this.scoreboard = new ArrayList<>(searchCluster.groups().size());
for (Group group : searchCluster.groups().values()) {
@@ -53,7 +50,7 @@ public class LoadBalancer {
* @return The node group to target, or <i>empty</i> if the internal dispatch logic cannot be used
*/
public Optional<Group> takeGroupForQuery(Query query) {
- if (!isInternallyDispatchable) {
+ if (scoreboard == null) {
return Optional.empty();
}
diff --git a/container-search/src/test/java/com/yahoo/prelude/templates/test/qr-templates.cfg b/container-search/src/test/java/com/yahoo/prelude/templates/test/qr-templates.cfg
deleted file mode 100644
index 10efc3334be..00000000000
--- a/container-search/src/test/java/com/yahoo/prelude/templates/test/qr-templates.cfg
+++ /dev/null
@@ -1,104 +0,0 @@
-templateset[10]
-templateset[0].urlprefix "/xsearch"
-templateset[0].mimetype "text/xml"
-templateset[0].encoding "utf-8"
-templateset[0].rankprofile 0
-templateset[0].keepalive false
-templateset[0].headertemplate "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<RESULTSET TOTALHITS=\"$result.totalHitCount\">\n"
-templateset[0].footertemplate "</RESULTSET>"
-templateset[0].nohitstemplate "<XTEMPLATENOHITS/>\n"
-templateset[0].hittemplate "<XTEMPLATEHIT RELEVANCY=\"$hit.relevance\" SOURCE=\"$hit.source\" TYPE=\"$hit.typeString\" OFFSET=\"$hitno\">\n<FIELD NAME=\"uri\">$uri</FIELD>\n<FIELD NAME=\"category\">$category</FIELD>\n<FIELD NAME=\"bsumtitle\">$bsumtitle</FIELD>\n</XTEMPLATEHIT>\n"
-templateset[0].errortemplate "<ERROR CODE=\"$result.hits().error.code\">$result.hits().error.message</ERROR>\n"
-templateset[1].urlprefix "/cgi-bin/asearch"
-templateset[1].mimetype "text/html"
-templateset[1].encoding "utf-8"
-templateset[1].rankprofile 0
-templateset[1].keepalive false
-templateset[1].headertemplate "### Result\n"
-templateset[1].footertemplate "### Result\n"
-templateset[1].nohitstemplate "### Result\n"
-templateset[1].hittemplate "### Result\n"
-templateset[1].errortemplate "### Result\n"
-templateset[2].urlprefix "/groups"
-templateset[2].mimetype "text/xml"
-templateset[2].encoding "utf-8"
-templateset[2].rankprofile 0
-templateset[2].keepalive false
-templateset[2].headertemplate "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<RESULTSET TOTALHITS=\"$result.totalHitCount\">\n"
-templateset[2].footertemplate "</RESULTSET>"
-templateset[2].nohitstemplate "<XTEMPLATENOHITS/>\n"
-templateset[2].hittemplate "<XTEMPLATEHIT RELEVANCY=\"$relevancy\" SOURCE=\"$hit.source\" TYPE=\"$hit.typeString\" OFFSET=\"$hitno\">\n<FIELD NAME=\"uri\">$uri</FIELD>\n<FIELD NAME=\"category\">$category</FIELD>\n<FIELD NAME=\"bsumtitle\">$bsumtitle</FIELD>\n</XTEMPLATEHIT>\n"
-templateset[2].errortemplate "<ERROR CODE=\"$result.error.code\">$result.error.message</ERROR>"
-templateset[2].groupsheadertemplate "<GROUP ATTRIBUTE=\"$field\">\n"
-templateset[2].rangegrouptemplate "<RANGE LOW=\"$group.from\" HIGH=\"$group.to\" AMOUNT=\"$group.count\"/>\n"
-templateset[2].exactgrouptemplate "<VAL VAL=\"$group.value\" AMOUNT=\"$group.count\"/>\n"
-templateset[2].groupsfootertemplate "</GROUP>\n"
-templateset[3].urlprefix "/pertemplatebolding"
-templateset[3].mimetype "text/xml"
-templateset[3].encoding "utf-8"
-templateset[3].rankprofile 0
-templateset[3].keepalive false
-templateset[3].headertemplate "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<RESULTSET TOTALHITS=\"$result.totalHitCount\">\n"
-templateset[3].footertemplate "</RESULTSET>"
-templateset[3].nohitstemplate "<XTEMPLATENOHITS/>\n"
-templateset[3].hittemplate "<BOLDINGTEST>\n<FIELD NAME=\"uri\">$uri</FIELD>\n<FIELD NAME=\"bsumtitle\" NOTE=\"bolded escaped\">$bsumtitle</FIELD>\n<FIELD NAME=\"bsumtitle\" NOTE=\"bolded unescaped\">$hit.getField(\"bsumtitle\")</FIELD>\n<FIELD NAME=\"bsumtitle\" NOTE=\"unbolded unescaped\">$hit.getField(\"bsumtitle\").bareContent(false, false)</FIELD>\n<FIELD NAME=\"bsumtitle\" NOTE=\"unbolded escaped\">$hit.getField(\"bsumtitle\").bareContent(true, false)</FIELD>\n</BOLDINGTEST>\n"
-templateset[3].errortemplate "<ERROR CODE=\"$result.error.code\">$result.error.message</ERROR>\n"
-templateset[4].urlprefix "/customhighlighttags"
-templateset[4].mimetype "text/xml"
-templateset[4].encoding "utf-8"
-templateset[4].rankprofile 0
-templateset[4].keepalive false
-templateset[4].headertemplate "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<RESULTSET TOTALHITS=\"$result.totalHitCount\">\n"
-templateset[4].footertemplate "</RESULTSET>"
-templateset[4].nohitstemplate "<XTEMPLATENOHITS/>\n"
-templateset[4].hittemplate "<XTEMPLATEHIT RELEVANCY=\"$hit.relevance\" SOURCE=\"$hit.source\" TYPE=\"$hit.typeString\" OFFSET=\"$hitno\">\n<FIELD NAME=\"uri\">$uri</FIELD>\n<FIELD NAME=\"category\">$category</FIELD>\n<FIELD NAME=\"bsumtitle\">$bsumtitle</FIELD>\n</XTEMPLATEHIT>\n"
-templateset[4].errortemplate "<ERROR CODE=\"$result.error.code\">$result.error.message</ERROR>\n"
-templateset[4].highlightstarttag "<b>"
-templateset[4].highlightendtag "</b>"
-templateset[4].highlightseptag "<p />"
-templateset[5].urlprefix "/checkunsigned"
-templateset[5].mimetype "text/xml"
-templateset[5].encoding "utf-8"
-templateset[5].rankprofile 0
-templateset[5].keepalive false
-templateset[5].headertemplate ""
-templateset[5].footertemplate ""
-templateset[5].nohitstemplate ""
-templateset[5].hittemplate "$number $context.asUnsigned(\"number\")"
-templateset[5].errortemplate ""
-templateset[5].highlightstarttag ""
-templateset[5].highlightendtag ""
-templateset[5].highlightseptag ""
-templateset[6].urlprefix "/summaryclasstest"
-templateset[6].mimetype "text/xml"
-templateset[6].encoding "utf-8"
-templateset[6].rankprofile 0
-templateset[6].keepalive false
-templateset[6].headertemplate "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n<result>\n"
-templateset[6].footertemplate "</result>"
-templateset[6].nohitstemplate "<nohits />\n"
-templateset[6].hittemplate "<hit />\n"
-templateset[6].errortemplate "<error />\n"
-templateset[6].defaultsummaryclass "gnurglegnokk"
-templateset[7].urlprefix "/lazydecoding"
-templateset[7].mimetype "text/plain"
-templateset[7].encoding "utf-8"
-templateset[7].rankprofile 0
-templateset[7].keepalive false
-templateset[7].headertemplate ""
-templateset[7].footertemplate ""
-templateset[7].nohitstemplate "no hits"
-templateset[7].hittemplate "$URL\n$TITLE\n$WORDS\n$IPADDRESS"
-templateset[7].errortemplate "error"
-templateset[8].urlprefix "/java"
-templateset[8].classid "com.yahoo.prelude.templates.test.TestTemplate"
-templateset[8].mimetype "text/plain"
-templateset[8].encoding "utf-8"
-templateset[8].rankprofile 0
-templateset[8].keepalive false
-templateset[9].urlprefix "/boom"
-templateset[9].classid "com.yahoo.prelude.templates.test.BoomTemplate"
-templateset[9].mimetype "text/plain"
-templateset[9].encoding "utf-8"
-templateset[9].rankprofile 0
-templateset[9].keepalive false
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java
index e94c11e4473..5fa9dee8370 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java
@@ -47,18 +47,7 @@ public class LoadBalancerTest {
}
@Test
- public void requreThatLoadBalancerIgnoresClusteredSingleGroup() {
- Node n1 = new SearchCluster.Node(0, "test-node1", 0, 0);
- Node n2 = new SearchCluster.Node(1, "test-node2", 1, 0);
- SearchCluster cluster = new SearchCluster(88.0, Arrays.asList(n1, n2), null, 2, null);
- LoadBalancer lb = new LoadBalancer(cluster);
-
- Optional<Group> grp = lb.takeGroupForQuery(new Query());
- assertThat(grp.isPresent(), is(false));
- }
-
- @Test
- public void requreThatLoadBalancerIgnoresClusteredGroups() {
+ public void requreThatLoadBalancerServesClusteredGroups() {
Node n1 = new SearchCluster.Node(0, "test-node1", 0, 0);
Node n2 = new SearchCluster.Node(1, "test-node2", 1, 0);
Node n3 = new SearchCluster.Node(0, "test-node3", 0, 1);
@@ -67,7 +56,7 @@ public class LoadBalancerTest {
LoadBalancer lb = new LoadBalancer(cluster);
Optional<Group> grp = lb.takeGroupForQuery(new Query());
- assertThat(grp.isPresent(), is(false));
+ assertThat(grp.isPresent(), is(true));
}
@Test
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/MetricsService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/MetricsService.java
index 20e9710f092..7265273d0d0 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/MetricsService.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/MetricsService.java
@@ -1,9 +1,12 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.integration;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.HostName;
+import com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneId;
+import java.util.Collections;
import java.util.Map;
/**
@@ -17,6 +20,15 @@ public interface MetricsService {
DeploymentMetrics getDeploymentMetrics(ApplicationId application, ZoneId zone);
+ // TODO: Remove default once implementation catches up
+ /**
+ * Get status for a global rotation
+ * @param rotationName The fully qualified domain name of the rotation
+ */
+ default Map<HostName, RotationStatus> getRotationStatus(String rotationName) {
+ return Collections.emptyMap();
+ }
+
Map<String, SystemMetrics> getSystemMetrics(ApplicationId application, ZoneId zone);
class DeploymentMetrics {
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockOrganization.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockOrganization.java
index 8efbde52d4a..82d3be596bc 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockOrganization.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockOrganization.java
@@ -13,6 +13,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.NoSuchElementException;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicLong;
@@ -38,6 +39,8 @@ public class MockOrganization extends AbstractComponent implements Organization
@Override
public IssueId file(Issue issue) {
+ if ( ! properties.containsKey(issue.propertyId()))
+ throw new NoSuchElementException("Unknown property '" + issue.propertyId() + "'!");
IssueId issueId = IssueId.from("" + counter.incrementAndGet());
issues.put(issueId, new MockIssue(issue));
return issueId;
@@ -153,7 +156,7 @@ public class MockOrganization extends AbstractComponent implements Organization
this.issue = issue;
this.updated = clock.instant();
this.open = true;
- this.assignee = issue.assignee().orElse(properties.get(issue.propertyId()).defaultAssignee);
+ this.assignee = issue.assignee().orElse(null);
}
public Issue issue() { return issue; }
@@ -164,11 +167,10 @@ public class MockOrganization extends AbstractComponent implements Organization
private class PropertyInfo {
- private User defaultAssignee;
private List<List<User>> contacts = Collections.emptyList();
- private URI issueUrl;
- private URI contactsUrl;
- private URI propertyUrl;
+ private URI issueUrl = URI.create("issues.tld");
+ private URI contactsUrl = URI.create("contacts.tld");
+ private URI propertyUrl = URI.create("properties.tld");
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/GlobalRoutingService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/GlobalRoutingService.java
deleted file mode 100644
index d49d6a9e4c2..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/GlobalRoutingService.java
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.routing;
-
-import java.util.Map;
-
-/**
- * A global routing service.
- *
- * @author mpolden
- */
-public interface GlobalRoutingService {
-
- /** Returns the health status for each endpoint behind the given rotation name */
- Map<String, RotationStatus> getHealthStatus(String rotationName);
-
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/MemoryGlobalRoutingService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/MemoryGlobalRoutingService.java
deleted file mode 100644
index 9f1ac1b1f0b..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/routing/MemoryGlobalRoutingService.java
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.routing;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * @author bratseth
- */
-public class MemoryGlobalRoutingService implements GlobalRoutingService {
-
- @Override
- public Map<String, RotationStatus> getHealthStatus(String rotationName) {
- HashMap<String, RotationStatus> map = new HashMap<>();
- map.put("prod.us-west-1", RotationStatus.IN);
- return Collections.unmodifiableMap(map);
- }
-
-}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
index 677f2363c08..a6c3f11470d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller;
import com.google.common.collect.ImmutableMap;
@@ -7,6 +7,7 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.HostName;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService.ApplicationMetrics;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneId;
@@ -16,12 +17,12 @@ import com.yahoo.vespa.hosted.controller.application.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
+import com.yahoo.vespa.hosted.controller.application.RotationStatus;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
import java.time.Instant;
import java.util.Collections;
import java.util.Comparator;
-import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@@ -40,6 +41,7 @@ import java.util.stream.Collectors;
public class Application {
private final ApplicationId id;
+ private final Instant createdAt;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final Map<ZoneId, Deployment> deployments;
@@ -49,51 +51,48 @@ public class Application {
private final Optional<IssueId> ownershipIssueId;
private final ApplicationMetrics metrics;
private final Optional<RotationId> rotation;
+ private final Map<HostName, RotationStatus> rotationStatus;
/** Creates an empty application */
- public Application(ApplicationId id) {
- this(id, DeploymentSpec.empty, ValidationOverrides.empty, Collections.emptyMap(),
+ public Application(ApplicationId id, Instant now) {
+ this(id, now, DeploymentSpec.empty, ValidationOverrides.empty, Collections.emptyMap(),
new DeploymentJobs(OptionalLong.empty(), Collections.emptyList(), Optional.empty(), false),
Change.empty(), Change.empty(), Optional.empty(), new ApplicationMetrics(0, 0),
- Optional.empty());
+ Optional.empty(), Collections.emptyMap());
}
/** Used from persistence layer: Do not use */
- public Application(ApplicationId id, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
+ public Application(ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
List<Deployment> deployments, DeploymentJobs deploymentJobs, Change change,
Change outstandingChange, Optional<IssueId> ownershipIssueId, ApplicationMetrics metrics,
- Optional<RotationId> rotation) {
- this(id, deploymentSpec, validationOverrides,
+ Optional<RotationId> rotation, Map<HostName, RotationStatus> rotationStatus) {
+ this(id, createdAt, deploymentSpec, validationOverrides,
deployments.stream().collect(Collectors.toMap(Deployment::zone, d -> d)),
- deploymentJobs, change, outstandingChange, ownershipIssueId, metrics, rotation);
+ deploymentJobs, change, outstandingChange, ownershipIssueId, metrics, rotation, rotationStatus);
}
- Application(ApplicationId id, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
+ Application(ApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change,
Change outstandingChange, Optional<IssueId> ownershipIssueId, ApplicationMetrics metrics,
- Optional<RotationId> rotation) {
- Objects.requireNonNull(id, "id cannot be null");
- Objects.requireNonNull(deploymentSpec, "deploymentSpec cannot be null");
- Objects.requireNonNull(validationOverrides, "validationOverrides cannot be null");
- Objects.requireNonNull(deployments, "deployments cannot be null");
- Objects.requireNonNull(deploymentJobs, "deploymentJobs cannot be null");
- Objects.requireNonNull(change, "change cannot be null");
- Objects.requireNonNull(metrics, "metrics cannot be null");
- Objects.requireNonNull(rotation, "rotation cannot be null");
- this.id = id;
- this.deploymentSpec = deploymentSpec;
- this.validationOverrides = validationOverrides;
- this.deployments = ImmutableMap.copyOf(deployments);
- this.deploymentJobs = deploymentJobs;
- this.change = change;
- this.outstandingChange = outstandingChange;
- this.ownershipIssueId = ownershipIssueId;
- this.metrics = metrics;
- this.rotation = rotation;
+ Optional<RotationId> rotation, Map<HostName, RotationStatus> rotationStatus) {
+ this.id = Objects.requireNonNull(id, "id cannot be null");
+ this.createdAt = Objects.requireNonNull(createdAt, "instant of creation cannot be null");
+ this.deploymentSpec = Objects.requireNonNull(deploymentSpec, "deploymentSpec cannot be null");
+ this.validationOverrides = Objects.requireNonNull(validationOverrides, "validationOverrides cannot be null");
+ this.deployments = ImmutableMap.copyOf(Objects.requireNonNull(deployments, "deployments cannot be null"));
+ this.deploymentJobs = Objects.requireNonNull(deploymentJobs, "deploymentJobs cannot be null");
+ this.change = Objects.requireNonNull(change, "change cannot be null");
+ this.outstandingChange = Objects.requireNonNull(outstandingChange, "outstandingChange cannot be null");
+ this.ownershipIssueId = Objects.requireNonNull(ownershipIssueId, "ownershipIssueId cannot be null");
+ this.metrics = Objects.requireNonNull(metrics, "metrics cannot be null");
+ this.rotation = Objects.requireNonNull(rotation, "rotation cannot be null");
+ this.rotationStatus = ImmutableMap.copyOf(Objects.requireNonNull(rotationStatus, "rotationStatus cannot be null"));
}
public ApplicationId id() { return id; }
+ public Instant createdAt() { return createdAt; }
+
/**
* Returns the last deployed deployment spec of this application,
* or the empty deployment spec if it has never been deployed
@@ -129,12 +128,20 @@ public class Application {
public Change change() { return change; }
/**
- * Returns the change that should used for this application at the given instant, typically now.
+ * Returns the target that should be used for this application at the given instant, typically now.
+ *
+ * This will be any parts of current total change that aren't both blocked and not yet deployed anywhere.
*/
public Change changeAt(Instant now) {
- Change change = change();
- if ( ! deploymentSpec.canUpgradeAt(now)) change = change.withoutPlatform();
- if ( ! deploymentSpec.canChangeRevisionAt(now)) change = change.withoutApplication();
+ Change change = this.change;
+ if ( this.change.platform().isPresent()
+ && productionDeployments().values().stream().noneMatch(deployment -> deployment.version().equals(this.change.platform().get()))
+ && ! deploymentSpec.canUpgradeAt(now))
+ change = change.withoutPlatform();
+ if ( this.change.application().isPresent()
+ && productionDeployments().values().stream().noneMatch(deployment -> deployment.applicationVersion().equals(this.change.application().get()))
+ && ! deploymentSpec.canChangeRevisionAt(now))
+ change = change.withoutApplication();
return change;
}
@@ -182,6 +189,23 @@ public class Application {
return rotation.map(rotation -> new ApplicationRotation(id, rotation));
}
+ /** Returns the status of the global rotation assigned to this. Wil be empty if this does not have a global rotation. */
+ public Map<HostName, RotationStatus> rotationStatus() {
+ return rotationStatus;
+ }
+
+ /** Returns the global rotation status of given deployment */
+ public RotationStatus rotationStatus(Deployment deployment) {
+ // Rotation status only contains VIP host names, one per zone in the system. The only way to map VIP hostname to
+ // this deployment, and thereby determine rotation status, is to check if VIP hostname contains the
+ // deployment's environment and region.
+ return rotationStatus.entrySet().stream()
+ .filter(kv -> kv.getKey().value().contains(deployment.zone().value()))
+ .map(Map.Entry::getValue)
+ .findFirst()
+ .orElse(RotationStatus.unknown);
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index af5b9198343..2dd3b1dda23 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -274,7 +274,7 @@ public class ApplicationController {
zmsClient.addApplication(((AthenzTenant) tenant.get()).domain(),
new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId(id.application().value()));
}
- LockedApplication application = new LockedApplication(new Application(id), lock);
+ LockedApplication application = new LockedApplication(new Application(id, clock.instant()), lock);
store(application);
log.info("Created " + application);
return application.get();
@@ -504,6 +504,11 @@ public class ApplicationController {
/** Returns the endpoints of the deployment, or an empty list if the request fails */
public Optional<List<URI>> getDeploymentEndpoints(DeploymentId deploymentId) {
+ if ( ! get(deploymentId.applicationId())
+ .map(application -> application.deployments().containsKey(deploymentId.zoneId()))
+ .orElse(deploymentId.applicationId().instance().isTester()))
+ throw new NotExistsException("Deployment", deploymentId.toString());
+
try {
return Optional.of(ImmutableList.copyOf(routingGenerator.endpoints(deploymentId).stream()
.map(RoutingEndpoint::getEndpoint)
@@ -526,12 +531,10 @@ public class ApplicationController {
*/
public void deleteApplication(ApplicationId applicationId, Optional<NToken> token) {
// Find all instances of the application
- List<ApplicationId> instances = controller.applications().asList(applicationId.tenant())
- .stream()
- .map(Application::id)
- .filter(id -> id.application().equals(applicationId.application()) &&
- id.tenant().equals(applicationId.tenant()))
- .collect(Collectors.toList());
+ List<ApplicationId> instances = asList(applicationId.tenant()).stream()
+ .map(Application::id)
+ .filter(id -> id.application().equals(applicationId.application()))
+ .collect(Collectors.toList());
if (instances.isEmpty()) {
throw new NotExistsException("Could not delete application '" + applicationId + "': Application not found");
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
index 1576ab597be..731ed718fc0 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
@@ -24,14 +24,11 @@ import com.yahoo.vespa.hosted.controller.api.integration.dns.NameService;
import com.yahoo.vespa.hosted.controller.api.integration.entity.EntityService;
import com.yahoo.vespa.hosted.controller.api.integration.github.GitHub;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Organization;
-import com.yahoo.vespa.hosted.controller.api.integration.routing.GlobalRoutingService;
-import com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingGenerator;
import com.yahoo.vespa.hosted.controller.api.integration.zone.CloudName;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
import com.yahoo.vespa.hosted.controller.deployment.JobController;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
-import com.yahoo.vespa.hosted.controller.rotation.Rotation;
import com.yahoo.vespa.hosted.controller.versions.OsVersion;
import com.yahoo.vespa.hosted.controller.versions.OsVersionStatus;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
@@ -74,7 +71,6 @@ public class Controller extends AbstractComponent {
private final Clock clock;
private final GitHub gitHub;
private final EntityService entityService;
- private final GlobalRoutingService globalRoutingService;
private final ZoneRegistry zoneRegistry;
private final ConfigServer configServer;
private final MetricsService metricsService;
@@ -89,14 +85,13 @@ public class Controller extends AbstractComponent {
@Inject
public Controller(CuratorDb curator, RotationsConfig rotationsConfig,
GitHub gitHub, EntityService entityService, Organization organization,
- GlobalRoutingService globalRoutingService,
ZoneRegistry zoneRegistry, ConfigServer configServer,
MetricsService metricsService, NameService nameService,
RoutingGenerator routingGenerator, Chef chef, AthenzClientFactory athenzClientFactory,
ArtifactRepository artifactRepository, ApplicationStore applicationStore, TesterCloud testerCloud,
BuildService buildService, RunDataStore runDataStore) {
this(curator, rotationsConfig,
- gitHub, entityService, globalRoutingService, zoneRegistry,
+ gitHub, entityService, zoneRegistry,
configServer, metricsService, nameService, routingGenerator, chef,
Clock.systemUTC(), athenzClientFactory, artifactRepository, applicationStore, testerCloud,
buildService, runDataStore, com.yahoo.net.HostName::getLocalhost);
@@ -104,7 +99,6 @@ public class Controller extends AbstractComponent {
public Controller(CuratorDb curator, RotationsConfig rotationsConfig,
GitHub gitHub, EntityService entityService,
- GlobalRoutingService globalRoutingService,
ZoneRegistry zoneRegistry, ConfigServer configServer,
MetricsService metricsService, NameService nameService,
RoutingGenerator routingGenerator, Chef chef, Clock clock,
@@ -116,7 +110,6 @@ public class Controller extends AbstractComponent {
this.curator = Objects.requireNonNull(curator, "Curator cannot be null");
this.gitHub = Objects.requireNonNull(gitHub, "GitHub cannot be null");
this.entityService = Objects.requireNonNull(entityService, "EntityService cannot be null");
- this.globalRoutingService = Objects.requireNonNull(globalRoutingService, "GlobalRoutingService cannot be null");
this.zoneRegistry = Objects.requireNonNull(zoneRegistry, "ZoneRegistry cannot be null");
this.configServer = Objects.requireNonNull(configServer, "ConfigServer cannot be null");
this.metricsService = Objects.requireNonNull(metricsService, "MetricsService cannot be null");
@@ -168,10 +161,6 @@ public class Controller extends AbstractComponent {
public ZoneRegistry zoneRegistry() { return zoneRegistry; }
- public Map<String, RotationStatus> rotationStatus(Rotation rotation) {
- return globalRoutingService.getHealthStatus(rotation.name());
- }
-
public ApplicationView getApplicationView(String tenantName, String applicationName, String instanceName,
String environment, String region) {
return configServer.getApplicationView(tenantName, applicationName, instanceName, environment, region);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
index 2209cdf3013..433a6d3ed38 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
@@ -6,10 +6,12 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.HostName;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService.ApplicationMetrics;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
+import com.yahoo.vespa.hosted.controller.application.RotationStatus;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.application.ApplicationRotation;
import com.yahoo.vespa.hosted.controller.application.ApplicationVersion;
@@ -40,6 +42,7 @@ public class LockedApplication {
private final Lock lock;
private final ApplicationId id;
+ private final Instant createdAt;
private final DeploymentSpec deploymentSpec;
private final ValidationOverrides validationOverrides;
private final Map<ZoneId, Deployment> deployments;
@@ -49,6 +52,7 @@ public class LockedApplication {
private final Optional<IssueId> ownershipIssueId;
private final ApplicationMetrics metrics;
private final Optional<RotationId> rotation;
+ private final Map<HostName, RotationStatus> rotationStatus;
/**
* Used to create a locked application
@@ -57,21 +61,23 @@ public class LockedApplication {
* @param lock The lock for the application.
*/
LockedApplication(Application application, Lock lock) {
- this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(),
+ this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(),
application.deploymentSpec(), application.validationOverrides(),
application.deployments(),
application.deploymentJobs(), application.change(), application.outstandingChange(),
application.ownershipIssueId(), application.metrics(),
- application.rotation().map(ApplicationRotation::id));
+ application.rotation().map(ApplicationRotation::id),
+ application.rotationStatus());
}
- private LockedApplication(Lock lock, ApplicationId id,
+ private LockedApplication(Lock lock, ApplicationId id, Instant createdAt,
DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
Map<ZoneId, Deployment> deployments, DeploymentJobs deploymentJobs, Change change,
Change outstandingChange, Optional<IssueId> ownershipIssueId, ApplicationMetrics metrics,
- Optional<RotationId> rotation) {
+ Optional<RotationId> rotation, Map<HostName, RotationStatus> rotationStatus) {
this.lock = lock;
this.id = id;
+ this.createdAt = createdAt;
this.deploymentSpec = deploymentSpec;
this.validationOverrides = validationOverrides;
this.deployments = deployments;
@@ -81,43 +87,44 @@ public class LockedApplication {
this.ownershipIssueId = ownershipIssueId;
this.metrics = metrics;
this.rotation = rotation;
+ this.rotationStatus = rotationStatus;
}
/** Returns a read-only copy of this */
public Application get() {
- return new Application(id, deploymentSpec, validationOverrides, deployments, deploymentJobs, change,
- outstandingChange, ownershipIssueId, metrics, rotation);
+ return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change,
+ outstandingChange, ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication withBuiltInternally(boolean builtInternally) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.withBuiltInternally(builtInternally), change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication withProjectId(OptionalLong projectId) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.withProjectId(projectId), change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication withDeploymentIssueId(IssueId issueId) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.with(issueId), change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication withJobCompletion(long projectId, JobType jobType, JobStatus.JobRun completion,
Optional<DeploymentJobs.JobError> jobError) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.withCompletion(projectId, jobType, completion, jobError),
- change, outstandingChange, ownershipIssueId, metrics, rotation);
+ change, outstandingChange, ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication withJobTriggering(JobType jobType, JobStatus.JobRun job) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.withTriggering(jobType, job), change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication withNewDeployment(ZoneId zone, ApplicationVersion applicationVersion, Version version,
@@ -165,51 +172,56 @@ public class LockedApplication {
}
public LockedApplication withoutDeploymentJob(JobType jobType) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs.without(jobType), change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication with(DeploymentSpec deploymentSpec) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication with(ValidationOverrides validationOverrides) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication withChange(Change change) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication withOutstandingChange(Change outstandingChange) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication withOwnershipIssueId(IssueId issueId) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange,
- Optional.ofNullable(issueId), metrics, rotation);
+ Optional.ofNullable(issueId), metrics, rotation, rotationStatus);
}
public LockedApplication with(MetricsService.ApplicationMetrics metrics) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
public LockedApplication with(RotationId rotation) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange,
- ownershipIssueId, metrics, Optional.of(rotation));
+ ownershipIssueId, metrics, Optional.of(rotation), rotationStatus);
+ }
+
+ public LockedApplication withRotationStatus(Map<HostName, RotationStatus> rotationStatus) {
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, change,
+ outstandingChange, ownershipIssueId, metrics, rotation, rotationStatus);
}
/** Don't expose non-leaf sub-objects. */
@@ -220,9 +232,9 @@ public class LockedApplication {
}
private LockedApplication with(Map<ZoneId, Deployment> deployments) {
- return new LockedApplication(lock, id, deploymentSpec, validationOverrides, deployments,
+ return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides, deployments,
deploymentJobs, change, outstandingChange,
- ownershipIssueId, metrics, rotation);
+ ownershipIssueId, metrics, rotation, rotationStatus);
}
@Override
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Deployment.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Deployment.java
index a2433d223dc..c099e856d04 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Deployment.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Deployment.java
@@ -30,7 +30,7 @@ public class Deployment {
public Deployment(ZoneId zone, ApplicationVersion applicationVersion, Version version, Instant deployTime) {
this(zone, applicationVersion, version, deployTime, Collections.emptyMap(), Collections.emptyMap(),
- new DeploymentMetrics(), DeploymentActivity.none);
+ DeploymentMetrics.none, DeploymentActivity.none);
}
public Deployment(ZoneId zone, ApplicationVersion applicationVersion, Version version, Instant deployTime,
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentMetrics.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentMetrics.java
index c0f7bd6c6a1..35bc86cac6c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentMetrics.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentMetrics.java
@@ -2,24 +2,20 @@
package com.yahoo.vespa.hosted.controller.application;
/**
+ * Metrics for a deployment of an application.
+ *
* @author smorgrav
*/
public class DeploymentMetrics {
+ public static final DeploymentMetrics none = new DeploymentMetrics(0, 0, 0, 0, 0);
+
private final double queriesPerSecond;
private final double writesPerSecond;
private final double documentCount;
private final double queryLatencyMillis;
private final double writeLatencyMills;
- DeploymentMetrics() {
- this.queriesPerSecond = 0;
- this.writesPerSecond = 0;
- this.documentCount = 0;
- this.queryLatencyMillis = 0;
- this.writeLatencyMills = 0;
- }
-
public DeploymentMetrics(double queriesPerSecond, double writesPerSecond, double documentCount,
double queryLatencyMillis, double writeLatencyMills) {
this.queriesPerSecond = queriesPerSecond;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/RotationStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/RotationStatus.java
new file mode 100644
index 00000000000..c9e174e7191
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/RotationStatus.java
@@ -0,0 +1,20 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.application;
+
+/**
+ * Represents the health status of a global rotation.
+ *
+ * @author mpolden
+ */
+public enum RotationStatus {
+
+ /** Rotation has status 'in' and is receiving traffic */
+ in,
+
+ /** Rotation has status 'out' and is *NOT* receiving traffic */
+ out,
+
+ /** Rotation status is currently unknown, or no global rotation has been assigned */
+ unknown
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
index 8db7231c207..349f4390fd1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
@@ -11,7 +11,9 @@ import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.tenant.AthenzTenant;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
+import com.yahoo.yolean.Exceptions;
+import java.io.UncheckedIOException;
import java.time.Duration;
import java.util.NoSuchElementException;
import java.util.Optional;
@@ -35,29 +37,37 @@ public class ApplicationOwnershipConfirmer extends Maintainer {
@Override
protected void maintain() {
- confirmApplicationOwnerships();
- ensureConfirmationResponses();
+ try {
+ confirmApplicationOwnerships();
+ ensureConfirmationResponses();
+ }
+ catch (UncheckedIOException e) {
+ log.log(Level.INFO, () -> "IO exception handling issues, will retry in " + maintenanceInterval() + ": '" + Exceptions.toMessageString(e));
+ }
}
/** File an ownership issue with the owners of all applications we know about. */
private void confirmApplicationOwnerships() {
ApplicationList.from(controller().applications().asList())
- .notPullRequest()
- .hasProductionDeployment()
- .asList()
- .forEach(application -> {
- try {
- Tenant tenant = ownerOf(application.id());
- Optional<IssueId> ourIssueId = application.ownershipIssueId();
- ourIssueId = tenant instanceof AthenzTenant
- ? ownershipIssues.confirmOwnership(ourIssueId, application.id(), propertyIdFor((AthenzTenant) tenant))
- : ownershipIssues.confirmOwnership(ourIssueId, application.id(), userFor(tenant));
- ourIssueId.ifPresent(issueId -> store(issueId, application.id()));
- }
- catch (RuntimeException e) { // Catch errors due to wrong data in the controller, or issues client timeout.
- log.log(Level.WARNING, "Exception caught when attempting to file an issue for " + application.id(), e);
- }
- });
+ .notPullRequest()
+ .withProjectId()
+ .hasProductionDeployment()
+ .asList()
+ .stream()
+ .filter(application -> application.createdAt().isBefore(controller().clock().instant().minus(Duration.ofDays(90))))
+ .forEach(application -> {
+ try {
+ Tenant tenant = ownerOf(application.id());
+ Optional<IssueId> ourIssueId = application.ownershipIssueId();
+ ourIssueId = tenant instanceof AthenzTenant
+ ? ownershipIssues.confirmOwnership(ourIssueId, application.id(), propertyIdFor((AthenzTenant) tenant))
+ : ownershipIssues.confirmOwnership(ourIssueId, application.id(), userFor(tenant));
+ ourIssueId.ifPresent(issueId -> store(issueId, application.id()));
+ }
+ catch (RuntimeException e) { // Catch errors due to wrong data in the controller, or issues client timeout.
+ log.log(Level.WARNING, "Exception caught when attempting to file an issue for " + application.id(), e);
+ }
+ });
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
index 91eda31d779..70bc4133772 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
@@ -12,7 +12,9 @@ import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.tenant.AthenzTenant;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
+import com.yahoo.yolean.Exceptions;
+import java.io.UncheckedIOException;
import java.time.Duration;
import java.util.Collection;
import java.util.List;
@@ -45,9 +47,22 @@ public class DeploymentIssueReporter extends Maintainer {
@Override
protected void maintain() {
- maintainDeploymentIssues(controller().applications().asList());
- maintainPlatformIssue(controller().applications().asList());
- escalateInactiveDeploymentIssues(controller().applications().asList());
+ try {
+ maintainDeploymentIssues(applications());
+ maintainPlatformIssue(applications());
+ escalateInactiveDeploymentIssues(applications());
+ }
+ catch (UncheckedIOException e) {
+ log.log(Level.INFO, () -> "IO exception handling issues, will retry in " + maintenanceInterval() + ": '" + Exceptions.toMessageString(e));
+ }
+ }
+
+ /** Returns the applications to maintain issue status for. */
+ private List<Application> applications() {
+ return ApplicationList.from(controller().applications().asList())
+ .withProjectId()
+ .notPullRequest()
+ .asList();
}
/**
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
index 4dacb2e32d6..1507cc8fedd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
@@ -1,16 +1,26 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.maintenance;// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.maintenance;
+import com.yahoo.config.provision.HostName;
import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService;
import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
+import com.yahoo.vespa.hosted.controller.application.RotationStatus;
import com.yahoo.yolean.Exceptions;
import java.io.UncheckedIOException;
import java.time.Duration;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -24,37 +34,78 @@ public class DeploymentMetricsMaintainer extends Maintainer {
private static final Logger log = Logger.getLogger(DeploymentMetricsMaintainer.class.getName());
- DeploymentMetricsMaintainer(Controller controller, Duration duration, JobControl jobControl) {
+ private static final int applicationsToUpdateInParallel = 10;
+
+ private final ApplicationController applications;
+
+ public DeploymentMetricsMaintainer(Controller controller, Duration duration, JobControl jobControl) {
super(controller, duration, jobControl);
+ this.applications = controller.applications();
}
@Override
protected void maintain() {
- boolean hasWarned = false;
- for (Application application : ApplicationList.from(controller().applications().asList()).notPullRequest().asList()) {
- try {
- controller().applications().lockIfPresent(application.id(), lockedApplication ->
- controller().applications().store(lockedApplication.with(controller().metricsService().getApplicationMetrics(application.id()))));
-
- for (Deployment deployment : application.deployments().values()) {
- MetricsService.DeploymentMetrics deploymentMetrics = controller().metricsService()
- .getDeploymentMetrics(application.id(), deployment.zone());
- DeploymentMetrics newMetrics = new DeploymentMetrics(deploymentMetrics.queriesPerSecond(),
- deploymentMetrics.writesPerSecond(),
- deploymentMetrics.documentCount(),
- deploymentMetrics.queryLatencyMillis(),
- deploymentMetrics.writeLatencyMillis());
-
- controller().applications().lockIfPresent(application.id(), lockedApplication ->
- controller().applications().store(lockedApplication.with(deployment.zone(), newMetrics)
- .recordActivityAt(controller().clock().instant(), deployment.zone())));
+ AtomicBoolean hasWarned = new AtomicBoolean(false);
+ List<Application> applicationList = ApplicationList.from(applications.asList()).notPullRequest().asList();
+
+ // Run parallel stream inside a custom ForkJoinPool so that we can control the number of threads used
+ ForkJoinPool pool = new ForkJoinPool(applicationsToUpdateInParallel);
+ pool.submit(() -> {
+ applicationList.parallelStream().forEach(application -> {
+ try {
+ applications.lockIfPresent(application.id(), locked ->
+ applications.store(locked.with(controller().metricsService().getApplicationMetrics(application.id()))));
+
+ applications.lockIfPresent(application.id(), locked ->
+ applications.store(locked.withRotationStatus(rotationStatus(application))));
+
+ for (Deployment deployment : application.deployments().values()) {
+ MetricsService.DeploymentMetrics deploymentMetrics = controller().metricsService()
+ .getDeploymentMetrics(application.id(), deployment.zone());
+ DeploymentMetrics newMetrics = new DeploymentMetrics(deploymentMetrics.queriesPerSecond(),
+ deploymentMetrics.writesPerSecond(),
+ deploymentMetrics.documentCount(),
+ deploymentMetrics.queryLatencyMillis(),
+ deploymentMetrics.writeLatencyMillis());
+
+ applications.lockIfPresent(application.id(), locked ->
+ applications.store(locked.with(deployment.zone(), newMetrics)
+ .recordActivityAt(controller().clock().instant(), deployment.zone())));
+ }
+ } catch (UncheckedIOException e) {
+ if (!hasWarned.getAndSet(true)) {// produce only one warning per maintenance interval
+ log.log(Level.WARNING, "Failed to query metrics service: " + Exceptions.toMessageString(e) +
+ ". Retrying in " + maintenanceInterval());
+ }
}
- } catch (UncheckedIOException e) {
- if (!hasWarned) // produce only one warning per maintenance interval
- log.log(Level.WARNING, "Failed to query metrics service: " + Exceptions.toMessageString(e) +
- ". Retrying in " + maintenanceInterval());
- hasWarned = true;
- }
+ });
+ });
+ pool.shutdown();
+ try {
+ pool.awaitTermination(30, TimeUnit.MINUTES);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ /** Get global rotation status for application */
+ private Map<HostName, RotationStatus> rotationStatus(Application application) {
+ return applications.rotationRepository().getRotation(application)
+ .map(rotation -> controller().metricsService().getRotationStatus(rotation.name()))
+ .map(rotationStatus -> {
+ Map<HostName, RotationStatus> result = new TreeMap<>();
+ rotationStatus.forEach((hostname, status) -> result.put(hostname, from(status)));
+ return result;
+ })
+ .orElseGet(Collections::emptyMap);
+ }
+
+ private static RotationStatus from(com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus status) {
+ switch (status) {
+ case IN: return RotationStatus.in;
+ case OUT: return RotationStatus.out;
+ case UNKNOWN: return RotationStatus.unknown;
+ default: throw new IllegalArgumentException("Unknown API value for rotation status: " + status);
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
index 58e0b8dbeec..3975613835b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.persistence;
import com.yahoo.component.Version;
@@ -6,6 +6,7 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.HostName;
import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
@@ -26,18 +27,21 @@ import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
+import com.yahoo.vespa.hosted.controller.application.RotationStatus;
import com.yahoo.vespa.hosted.controller.application.SourceRevision;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.OptionalDouble;
import java.util.OptionalLong;
+import java.util.TreeMap;
/**
* Serializes applications to/from slime.
@@ -49,6 +53,7 @@ public class ApplicationSerializer {
// Application fields
private final String idField = "id";
+ private final String createdAtField = "createdAt";
private final String deploymentSpecField = "deploymentSpecField";
private final String validationOverridesField = "validationOverrides";
private final String deploymentsField = "deployments";
@@ -59,6 +64,7 @@ public class ApplicationSerializer {
private final String writeQualityField = "writeQuality";
private final String queryQualityField = "queryQuality";
private final String rotationField = "rotation";
+ private final String rotationStatusField = "rotationStatus";
// Deployment fields
private final String zoneField = "zone";
@@ -124,13 +130,13 @@ public class ApplicationSerializer {
private final String deploymentMetricsQueryLatencyField = "queryLatencyMillis";
private final String deploymentMetricsWriteLatencyField = "writeLatencyMillis";
-
// ------------------ Serialization
public Slime toSlime(Application application) {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString(idField, application.id().serializedForm());
+ root.setLong(createdAtField, application.createdAt().toEpochMilli());
root.setString(deploymentSpecField, application.deploymentSpec().xmlForm());
root.setString(validationOverridesField, application.validationOverrides().xmlForm());
deploymentsToSlime(application.deployments().values(), root.setArray(deploymentsField));
@@ -141,6 +147,7 @@ public class ApplicationSerializer {
root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
application.rotation().ifPresent(rotation -> root.setString(rotationField, rotation.id().asString()));
+ toSlime(application.rotationStatus(), root.setArray(rotationStatusField));
return slime;
}
@@ -268,12 +275,21 @@ public class ApplicationSerializer {
toSlime(deploying.application().get(), object);
}
+ private void toSlime(Map<HostName, RotationStatus> rotationStatus, Cursor array) {
+ rotationStatus.forEach((hostname, status) -> {
+ Cursor object = array.addObject();
+ object.setString("hostname", hostname.value());
+ object.setString("status", status.name());
+ });
+ }
+
// ------------------ Deserialization
public Application fromSlime(Slime slime) {
Inspector root = slime.get();
ApplicationId id = ApplicationId.fromSerializedForm(root.field(idField).asString());
+ Instant createdAt = Instant.ofEpochMilli(root.field(createdAtField).asLong());
DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
List<Deployment> deployments = deploymentsFromSlime(root.field(deploymentsField));
@@ -284,9 +300,10 @@ public class ApplicationSerializer {
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Optional<RotationId> rotation = rotationFromSlime(root.field(rotationField));
+ Map<HostName, RotationStatus> rotationStatus = rotationStatusFromSlime(root.field(rotationStatusField));
- return new Application(id, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying,
- outstandingChange, ownershipIssueId, metrics, rotation);
+ return new Application(id, createdAt, deploymentSpec, validationOverrides, deployments, deploymentJobs, deploying,
+ outstandingChange, ownershipIssueId, metrics, rotation, rotationStatus);
}
private List<Deployment> deploymentsFromSlime(Inspector array) {
@@ -317,6 +334,19 @@ public class ApplicationSerializer {
object.field(deploymentMetricsWriteLatencyField).asDouble());
}
+ private Map<HostName, RotationStatus> rotationStatusFromSlime(Inspector object) {
+ if (!object.valid()) {
+ return Collections.emptyMap();
+ }
+ Map<HostName, RotationStatus> rotationStatus = new TreeMap<>();
+ object.traverse((ArrayTraverser) (idx, inspect) -> {
+ HostName hostname = HostName.from(inspect.field("hostname").asString());
+ RotationStatus status = RotationStatus.valueOf(inspect.field("status").asString());
+ rotationStatus.put(hostname, status);
+ });
+ return Collections.unmodifiableMap(rotationStatus);
+ }
+
private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime(Inspector object) {
Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
object.traverse((String name, Inspector obect) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(obect)));
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 154c4e632de..808bb2e716a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -52,7 +52,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.Log;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Logs;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
-import com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.ApplicationVersion;
@@ -64,6 +63,7 @@ import com.yahoo.vespa.hosted.controller.application.DeploymentCost;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
+import com.yahoo.vespa.hosted.controller.application.RotationStatus;
import com.yahoo.vespa.hosted.controller.application.SourceRevision;
import com.yahoo.vespa.hosted.controller.restapi.ErrorResponse;
import com.yahoo.vespa.hosted.controller.restapi.MessageResponse;
@@ -89,7 +89,6 @@ import java.security.Principal;
import java.time.DayOfWeek;
import java.time.Duration;
import java.util.Arrays;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -443,10 +442,9 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
deploymentObject.setString("instance", application.id().instance().value()); // pointless
- controller.applications().rotationRepository().getRotation(application).ifPresent(rotation -> {
- Map<String, RotationStatus> rotationHealthStatus = controller.rotationStatus(rotation);
- setRotationStatus(deployment, rotationHealthStatus, deploymentObject);
- });
+ if (application.rotation().isPresent() && deployment.zone().environment() == Environment.prod) {
+ toSlime(application.rotationStatus(deployment), deploymentObject);
+ }
if (recurseOverDeployments(request)) // List full deployment information when recursive.
toSlime(deploymentObject, new DeploymentId(application.id(), deployment.zone()), deployment, request);
@@ -557,6 +555,11 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
object.setString("gitCommit", revision.get().commit());
}
+ private void toSlime(RotationStatus status, Cursor object) {
+ Cursor bcpStatus = object.setObject("bcpStatus");
+ bcpStatus.setString("rotationStatus", status.name().toUpperCase());
+ }
+
private URI monitoringSystemUri(DeploymentId deploymentId) {
return controller.zoneRegistry().getMonitoringSystemUri(deploymentId);
}
@@ -617,24 +620,18 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
private HttpResponse rotationStatus(String tenantName, String applicationName, String instanceName, String environment, String region) {
ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, instanceName);
Application application = controller.applications().require(applicationId);
+ ZoneId zone = ZoneId.from(environment, region);
if (!application.rotation().isPresent()) {
- throw new NotExistsException("global rotation does not exist for '" + environment + "." + region + "'");
+ throw new NotExistsException("global rotation does not exist for " + application);
+ }
+ Deployment deployment = application.deployments().get(zone);
+ if (deployment == null) {
+ throw new NotExistsException(application + " has no deployment in " + zone);
}
Slime slime = new Slime();
Cursor response = slime.setObject();
-
- Map<String, RotationStatus> rotationStatus = controller.applications().rotationRepository()
- .getRotation(application)
- .map(controller::rotationStatus)
- .orElseGet(Collections::emptyMap);
- for (String rotationEndpoint : rotationStatus.keySet()) {
- if (rotationEndpoint.contains(toDns(environment)) && rotationEndpoint.contains(toDns(region))) {
- Cursor bcpStatusObject = response.setObject("bcpStatus");
- bcpStatusObject.setString("rotationStatus", rotationStatus.getOrDefault(rotationEndpoint, RotationStatus.UNKNOWN).name());
- }
- }
-
+ toSlime(application.rotationStatus(deployment), response);
return new SlimeJsonResponse(slime);
}
@@ -991,28 +988,6 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
}
}
- private void setRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus, Cursor object) {
- if ( ! deployment.zone().environment().equals(Environment.prod)) return;
-
- Cursor bcpStatusObject = object.setObject("bcpStatus");
- bcpStatusObject.setString("rotationStatus", findRotationStatus(deployment, healthStatus).name());
- }
-
- private RotationStatus findRotationStatus(Deployment deployment, Map<String, RotationStatus> healthStatus) {
- for (String endpoint : healthStatus.keySet()) {
- if (endpoint.contains(toDns(deployment.zone().environment().value())) &&
- endpoint.contains(toDns(deployment.zone().region().value()))) {
- return healthStatus.getOrDefault(endpoint, RotationStatus.UNKNOWN);
- }
- }
-
- return RotationStatus.UNKNOWN;
- }
-
- private String toDns(String id) {
- return id.replace('_', '-');
- }
-
private long asLong(String valueOrNull, long defaultWhenNull) {
if (valueOrNull == null) return defaultWhenNull;
try {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/ControllerAuthorizationFilter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/ControllerAuthorizationFilter.java
index a36a8d8384f..33d53b0becf 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/ControllerAuthorizationFilter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/filter/ControllerAuthorizationFilter.java
@@ -121,7 +121,8 @@ public class ControllerAuthorizationFilter extends CorsRequestFilterBase {
path.matches("/provision/v2/{*}") ||
path.matches("/screwdriver/v1/trigger/tenant/{*}") ||
path.matches("/os/v1/{*}") ||
- path.matches("/zone/v2/{*}");
+ path.matches("/zone/v2/{*}") ||
+ path.matches("/nodes/v2/{*}");
}
private static boolean isTenantAdminOperation(Path path, Method method) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
index 61b921aa6c1..287fbe8c36d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
@@ -23,7 +23,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.entity.EntityService;
import com.yahoo.vespa.hosted.controller.api.integration.entity.MemoryEntityService;
import com.yahoo.vespa.hosted.controller.api.integration.github.GitHubMock;
import com.yahoo.vespa.hosted.controller.api.integration.organization.MockOrganization;
-import com.yahoo.vespa.hosted.controller.api.integration.routing.MemoryGlobalRoutingService;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingGenerator;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockBuildService;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockRunDataStore;
@@ -301,7 +300,6 @@ public final class ControllerTester {
rotationsConfig,
gitHub,
entityService,
- new MemoryGlobalRoutingService(),
zoneRegistryMock,
configServer,
metricsService,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
index 3b381e21b27..b82855813ba 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
@@ -46,6 +46,7 @@ import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobTy
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.stagingTest;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.systemTest;
import static java.time.temporal.ChronoUnit.MILLIS;
+import static java.util.Collections.emptyList;
import static java.util.Collections.singletonList;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -340,7 +341,7 @@ public class DeploymentTriggerTest {
tester.deployAndNotify(app, changedApplication, true, stagingTest);
readyJobsTrigger.run();
- assertEquals(0, tester.buildService().jobs().size());
+ assertEquals(emptyList(), tester.buildService().jobs());
tester.clock().advance(Duration.ofHours(2)); // ---------------- Exit block window: 20:30
tester.deploymentTrigger().triggerReadyJobs(); // Schedules staging test for the blocked production job(s)
@@ -380,27 +381,17 @@ public class DeploymentTriggerTest {
assertEquals((BuildJob.defaultBuildNumber + 1), tester.application(application.id()).outstandingChange().application().get().buildNumber().getAsLong());
tester.readyJobTrigger().maintain();
- assertTrue(tester.buildService().jobs().isEmpty());
+ // Platform upgrade keeps rolling, since it has already deployed in a production zone.
+ assertEquals(1, tester.buildService().jobs().size());
+ tester.deployAndNotify(application, applicationPackage, true, productionUsEast3);
+ assertEquals(emptyList(), tester.buildService().jobs());
+
- // New component triggers a full deployment of new application version, leaving platform versions alone.
+ // New component triggers a full deployment of new application version, but only after the upgrade is done.
tester.jobCompletion(component).application(application).nextBuildNumber().nextBuildNumber().uploadArtifact(applicationPackage).submit();
tester.deployAndNotify(application, applicationPackage, true, stagingTest);
tester.deployAndNotify(application, applicationPackage, true, systemTest);
tester.deployAndNotify(application, applicationPackage, true, productionUsWest1);
- tester.deployAndNotify(application, applicationPackage, true, systemTest);
- tester.deployAndNotify(application, applicationPackage, true, stagingTest);
- tester.deployAndNotify(application, applicationPackage, true, productionUsEast3);
- tester.deployAndNotify(application, applicationPackage, true, systemTest);
- tester.deployAndNotify(application, applicationPackage, true, stagingTest);
-
- // All tests are done for now, and only the platform change remains.
- assertTrue(tester.buildService().jobs().isEmpty());
- assertEquals(Change.of(v2), tester.application(application.id()).change());
-
- // Exiting block window, staging test is re-run for the last prod zone, which has the old platform.
- clock.advance(Duration.ofHours(1));
- tester.readyJobTrigger().maintain();
- tester.deployAndNotify(application, applicationPackage, true, stagingTest);
tester.deployAndNotify(application, applicationPackage, true, productionUsEast3);
assertFalse(tester.application(application.id()).change().isPresent());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
index aea809de365..b0b3b352726 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
@@ -27,12 +27,9 @@ import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.serviceview.bindings.ApplicationView;
import com.yahoo.vespa.serviceview.bindings.ClusterView;
import com.yahoo.vespa.serviceview.bindings.ServiceView;
-import org.json.JSONException;
-import org.json.JSONObject;
import java.io.IOException;
import java.io.OutputStream;
-import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -53,7 +50,6 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
private final Map<ApplicationId, Application> applications = new LinkedHashMap<>();
private final Map<String, EndpointStatus> endpoints = new HashMap<>();
- private final Map<URI, Version> versions = new HashMap<>();
private final NodeRepositoryMock nodeRepository = new NodeRepositoryMock();
private final Map<DeploymentId, ServiceConvergence> serviceStatus = new HashMap<>();
private final Version initialVersion = new Version(6, 1, 0);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/MetricsServiceMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/MetricsServiceMock.java
index eca78c01e09..d5fa3d14c62 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/MetricsServiceMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/MetricsServiceMock.java
@@ -1,8 +1,11 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.integration;
+import com.yahoo.component.AbstractComponent;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.HostName;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService;
+import com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneId;
import java.util.HashMap;
@@ -11,15 +14,37 @@ import java.util.Map;
/**
* @author bratseth
*/
-public class MetricsServiceMock implements MetricsService {
+public class MetricsServiceMock extends AbstractComponent implements MetricsService {
private final Map<String, Double> metrics = new HashMap<>();
+ private final Map<String, Map<HostName, RotationStatus>> rotationStatus = new HashMap<>();
+
+ public MetricsServiceMock addRotation(String rotationName) {
+ rotationStatus.put(rotationName, new HashMap<>());
+ return this;
+ }
public MetricsServiceMock setMetric(String key, Double value) {
metrics.put(key, value);
return this;
}
+ public MetricsServiceMock setZoneIn(String rotationName, String vipName) {
+ if (!rotationStatus.containsKey(rotationName)) {
+ throw new IllegalArgumentException("Unknown rotation: " + rotationName);
+ }
+ rotationStatus.get(rotationName).put(HostName.from(vipName), RotationStatus.IN);
+ return this;
+ }
+
+ public MetricsServiceMock setZoneOut(String rotationName, String vipName) {
+ if (!rotationStatus.containsKey(rotationName)) {
+ throw new IllegalArgumentException("Unknown rotation: " + rotationName);
+ }
+ rotationStatus.get(rotationName).put(HostName.from(vipName), RotationStatus.OUT);
+ return this;
+ }
+
@Override
public ApplicationMetrics getApplicationMetrics(ApplicationId application) {
return new ApplicationMetrics(metrics.getOrDefault("queryServiceQuality", 0.5),
@@ -43,4 +68,9 @@ public class MetricsServiceMock implements MetricsService {
return result;
}
+ @Override
+ public Map<HostName, RotationStatus> getRotationStatus(String rotationName) {
+ return rotationStatus.get(rotationName);
+ }
+
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java
index 555fdb338e8..2694e205a68 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java
@@ -59,6 +59,13 @@ public class ApplicationOwnershipConfirmerTest {
confirmer.maintain();
confirmer.maintain();
+ assertFalse("No issue is stored for an application newer than 3 months.", propertyApp.get().ownershipIssueId().isPresent());
+ assertFalse("No issue is stored for an application newer than 3 months.", userApp.get().ownershipIssueId().isPresent());
+
+ tester.clock().advance(Duration.ofDays(91));
+ confirmer.maintain();
+ confirmer.maintain();
+
assertEquals("Confirmation issue has been filed for property owned application.", issueId, propertyApp.get().ownershipIssueId());
assertEquals("Confirmation issue has been filed for user owned application.", issueId, userApp.get().ownershipIssueId());
assertTrue("Both applications have had their responses ensured.", issues.escalatedForProperty && issues.escalatedForUser);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
index d3e42bae526..e11440a372c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
@@ -1,14 +1,18 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Environment;
import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.ControllerTester;
+import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
-import com.yahoo.vespa.hosted.controller.persistence.MockCuratorDb;
+import com.yahoo.vespa.hosted.controller.application.RotationStatus;
+import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
+import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
+import com.yahoo.vespa.hosted.controller.integration.MetricsServiceMock;
import org.junit.Test;
import java.time.Duration;
@@ -26,13 +30,11 @@ import static org.junit.Assert.assertFalse;
public class DeploymentMetricsMaintainerTest {
@Test
- public void maintain() {
+ public void updates_metrics() {
ControllerTester tester = new ControllerTester();
ApplicationId appId = tester.createAndDeploy("tenant1", "domain1", "app1",
Environment.dev, 123).id();
- DeploymentMetricsMaintainer maintainer = new DeploymentMetricsMaintainer(tester.controller(),
- Duration.ofDays(1),
- new JobControl(new MockCuratorDb()));
+ DeploymentMetricsMaintainer maintainer = maintainer(tester.controller());
Supplier<Application> app = tester.application(appId);
Supplier<Deployment> deployment = () -> app.get().deployments().values().stream().findFirst().get();
@@ -85,4 +87,50 @@ public class DeploymentMetricsMaintainerTest {
assertEquals(5, deployment.get().activity().lastWritesPerSecond().getAsDouble(), Double.MIN_VALUE);
}
+ @Test
+ public void updates_rotation_status() {
+ DeploymentTester tester = new DeploymentTester();
+ MetricsServiceMock metricsService = tester.controllerTester().metricsService();
+ DeploymentMetricsMaintainer maintainer = maintainer(tester.controller());
+ Application application = tester.createApplication("app1", "tenant1", 1, 1L);
+ ZoneId zone1 = ZoneId.from("prod", "us-west-1");
+ ZoneId zone2 = ZoneId.from("prod", "us-east-3");
+
+ // Deploy application with global rotation
+ ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
+ .environment(Environment.prod)
+ .globalServiceId("foo")
+ .region(zone1.region().value())
+ .region(zone2.region().value())
+ .build();
+ tester.deployCompletely(application, applicationPackage);
+
+ Supplier<Application> app = () -> tester.application(application.id());
+ Supplier<Deployment> deployment1 = () -> app.get().deployments().get(zone1);
+ Supplier<Deployment> deployment2 = () -> app.get().deployments().get(zone2);
+ String assignedRotation = "rotation-fqdn-01";
+ tester.controllerTester().metricsService().addRotation(assignedRotation);
+
+ // No status gathered yet
+ assertEquals(RotationStatus.unknown, app.get().rotationStatus(deployment1.get()));
+ assertEquals(RotationStatus.unknown, app.get().rotationStatus(deployment2.get()));
+
+ // One rotation out, one in
+ metricsService.setZoneIn(assignedRotation, "proxy.prod.us-west-1.vip.test");
+ metricsService.setZoneOut(assignedRotation,"proxy.prod.us-east-3.vip.test");
+ maintainer.maintain();
+ assertEquals(RotationStatus.in, app.get().rotationStatus(deployment1.get()));
+ assertEquals(RotationStatus.out, app.get().rotationStatus(deployment2.get()));
+
+ // All rotations in
+ metricsService.setZoneIn(assignedRotation,"proxy.prod.us-east-3.vip.test");
+ maintainer.maintain();
+ assertEquals(RotationStatus.in, app.get().rotationStatus(deployment1.get()));
+ assertEquals(RotationStatus.in, app.get().rotationStatus(deployment2.get()));
+ }
+
+ private static DeploymentMetricsMaintainer maintainer(Controller controller) {
+ return new DeploymentMetricsMaintainer(controller, Duration.ofDays(1), new JobControl(controller.curator()));
+ }
+
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
index f1b20694f3d..f2e209436dc 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
@@ -19,6 +19,7 @@ import org.junit.Test;
import java.time.Duration;
import java.time.Instant;
+import java.util.Collections;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.component;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionUsCentral1;
@@ -26,6 +27,7 @@ import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobTy
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionUsWest1;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.stagingTest;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.systemTest;
+import static java.lang.Enum.valueOf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -650,30 +652,15 @@ public class UpgraderTest {
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
clock.advance(Duration.ofHours(1)); // Entering block window after prod job is triggered
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
- assertTrue(tester.buildService().jobs().isEmpty()); // Next job not triggered due to being in the block window
+ assertEquals(1, tester.buildService().jobs().size()); // Next job triggered because upgrade is already rolling out.
- // One hour passes, time is 19:00, still no upgrade
- tester.clock().advance(Duration.ofHours(1));
- tester.triggerUntilQuiescence();
- assertTrue("No jobs scheduled", tester.buildService().jobs().isEmpty());
-
- // Another hour pass, time is 20:00 and application upgrades
- tester.clock().advance(Duration.ofHours(1));
- tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
}
- /**
- * Tests the scenario where a release is deployed to 2 of 3 production zones, then blocked,
- * followed by timeout of the upgrade and a new release.
- * In this case, the blocked production zone should not progress with upgrading to the previous version,
- * and should not upgrade to the new version until the other production zones have it
- * (expected behavior; both requirements are debatable).
- */
@Test
- public void testBlockVersionChangeHalfwayThoughThenNewVersion() {
+ public void testBlockVersionChangeHalfwayThoughThenNewRevision() {
ManualClock clock = new ManualClock(Instant.parse("2017-09-29T16:00:00.00Z")); // Friday, 16:00
DeploymentTester tester = new DeploymentTester(new ControllerTester(clock));
@@ -681,7 +668,6 @@ public class UpgraderTest {
tester.upgradeSystem(version);
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
- .upgradePolicy("canary")
// Block upgrades on weekends and ouside working hours
.blockChange(false, true, "mon-fri", "00-09,17-23", "UTC")
.blockChange(false, true, "sat-sun", "00-23", "UTC")
@@ -701,20 +687,38 @@ public class UpgraderTest {
tester.triggerUntilQuiescence();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
clock.advance(Duration.ofHours(1)); // Entering block window after prod job is triggered
- tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
- assertTrue(tester.buildService().jobs().isEmpty()); // Next job not triggered due to being in the block window
+ tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ assertEquals(1, tester.buildService().jobs().size()); // Next job triggered, as upgrade is already in progress.
+ tester.deployAndNotify(app, applicationPackage, false, productionUsCentral1); // us-central-1 fails, permitting a new revision.
+
+ // A new revision is submitted and starts rolling out.
+ tester.jobCompletion(component).application(app).nextBuildNumber().uploadArtifact(applicationPackage).submit();
- // A day passes and we get a new version
+ // us-central-1 fails again, and isn't re-triggered, because the target is now a revision instead.
+ tester.deployAndNotify(app, applicationPackage, false, productionUsCentral1);
+ assertEquals(2, tester.buildService().jobs().size());
+ tester.deployAndNotify(app, applicationPackage, true, systemTest);
+ tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
+ // us-central-1 has an older version, and needs a new staging test to begin.
+ tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+
+ // A new version is also released, cancelling the upgrade, since it is failing on a now outdated version.
tester.clock().advance(Duration.ofDays(1));
version = Version.fromString("5.2");
tester.upgradeSystem(version);
tester.upgrader().maintain();
tester.triggerUntilQuiescence();
- assertTrue("Nothing is scheduled", tester.buildService().jobs().isEmpty());
- // Monday morning: We are not blocked
+ // us-central-1 succeeds upgrade to 5.1, with the revision, but us-east-3 wants to proceed with only the revision change.
+ tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
+ tester.deployAndNotify(app, applicationPackage, true, systemTest);
+ tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
+ assertEquals(Collections.emptyList(), tester.buildService().jobs());
+
+ // Monday morning: We are not blocked, and the new version rolls out to all zones.
tester.clock().advance(Duration.ofDays(1)); // Sunday, 17:00
tester.clock().advance(Duration.ofHours(17)); // Monday, 10:00
tester.upgrader().maintain();
@@ -723,7 +727,6 @@ public class UpgraderTest {
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
- // us-east-3 has an older version than the other zones, and needs a new staging test run.
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
@@ -961,34 +964,24 @@ public class UpgraderTest {
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
clock.advance(Duration.ofHours(1)); // Entering block window after prod job is triggered.
tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
- assertTrue(tester.buildService().jobs().isEmpty()); // Next job not triggered due to being in the block window.
+ assertEquals(1, tester.buildService().jobs().size()); // Next job triggered in spite of block, because it is already rolling out.
- // One hour passes, time is 19:00, still no upgrade.
- tester.clock().advance(Duration.ofHours(1));
- tester.triggerUntilQuiescence();
- assertTrue("No jobs scheduled", tester.buildService().jobs().isEmpty());
-
- // New version is released and upgrades are started in the two first production zones.
+ // New version is released, but upgrades won't start since there's already a revision rolling out.
version = Version.fromString("5.1");
tester.upgradeSystem(version);
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
- tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
-
- // Tests for central-1.
- tester.deployAndNotify(app, applicationPackage, true, systemTest);
- tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.triggerUntilQuiescence();
+ assertEquals(1, tester.buildService().jobs().size()); // Still just the revision upgrade.
- // Another hour pass, time is 20:00 and both revision and version upgrades are now allowed.
- tester.clock().advance(Duration.ofHours(1));
- tester.triggerUntilQuiescence(); // Tests that trigger now test the full upgrade, since central-1 is still on old versions.
- tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1); // Only upgrade for now.
- // west-1 is now fully upgraded, central-1 only has new version, and east-3 has only old versions.
+ tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
+ tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
+ assertEquals(Collections.emptyList(), tester.buildService().jobs()); // No jobs left.
- // These tests were triggered with an upgrade of both version and revision. Since central-1 no longer upgrades version,
- // it ignores the initial version of the staging job, and so the current staging job is OK for both zones.
+ // Upgrade may start, now that revision is rolled out.
+ tester.upgrader().maintain();
+ tester.readyJobTrigger().maintain();
tester.deployAndNotify(app, applicationPackage, true, systemTest);
tester.deployAndNotify(app, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(app, applicationPackage, true, productionUsWest1);
tester.deployAndNotify(app, applicationPackage, true, productionUsCentral1);
tester.deployAndNotify(app, applicationPackage, true, productionUsEast3);
assertTrue("All jobs consumed", tester.buildService().jobs().isEmpty());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
index 40d245db8f0..3e09c9078a0 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.persistence;
import com.yahoo.component.Version;
@@ -6,6 +6,7 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.HostName;
import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.api.integration.MetricsService;
@@ -22,6 +23,7 @@ import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
+import com.yahoo.vespa.hosted.controller.application.RotationStatus;
import com.yahoo.vespa.hosted.controller.application.SourceRevision;
import com.yahoo.vespa.hosted.controller.rotation.RotationId;
import org.junit.Test;
@@ -30,6 +32,8 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Instant;
+import java.time.temporal.ChronoUnit;
+import java.time.temporal.TemporalUnit;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
@@ -37,6 +41,7 @@ import java.util.Map;
import java.util.Optional;
import java.util.OptionalDouble;
import java.util.OptionalLong;
+import java.util.TreeMap;
import static com.yahoo.config.provision.SystemName.main;
import static com.yahoo.vespa.hosted.controller.ControllerTester.writable;
@@ -70,7 +75,7 @@ public class ApplicationSerializerTest {
deployments.add(new Deployment(zone1, applicationVersion1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); // One deployment without cluster info and utils
deployments.add(new Deployment(zone2, applicationVersion2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5),
createClusterUtils(3, 0.2), createClusterInfo(3, 4),
- new DeploymentMetrics(2,3,4,5,6),
+ new DeploymentMetrics(2, 3, 4, 5, 6),
DeploymentActivity.create(Optional.of(activityAt), Optional.of(activityAt),
OptionalDouble.of(200), OptionalDouble.of(10))));
@@ -89,7 +94,12 @@ public class ApplicationSerializerTest {
DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, empty(), true);
+ Map<HostName, RotationStatus> rotationStatus = new TreeMap<>();
+ rotationStatus.put(HostName.from("rot1.fqdn"), RotationStatus.in);
+ rotationStatus.put(HostName.from("rot2.fqdn"), RotationStatus.out);
+
Application original = new Application(ApplicationId.from("t1", "a1", "i1"),
+ Instant.now().truncatedTo(ChronoUnit.MILLIS),
deploymentSpec,
validationOverrides,
deployments, deploymentJobs,
@@ -97,11 +107,13 @@ public class ApplicationSerializerTest {
Change.of(ApplicationVersion.from(new SourceRevision("repo", "master", "deadcafe"), 42)),
Optional.of(IssueId.from("1234")),
new MetricsService.ApplicationMetrics(0.5, 0.9),
- Optional.of(new RotationId("my-rotation")));
+ Optional.of(new RotationId("my-rotation")),
+ rotationStatus);
Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original));
assertEquals(original.id(), serialized.id());
+ assertEquals(original.createdAt(), serialized.createdAt());
assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm());
assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm());
@@ -129,6 +141,7 @@ public class ApplicationSerializerTest {
assertEquals(original.change(), serialized.change());
assertEquals(original.rotation().get().id(), serialized.rotation().get().id());
+ assertEquals(original.rotationStatus(), serialized.rotationStatus());
// Test cluster utilization
assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
index 2decbe54cb0..992e3cfcd36 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
@@ -69,7 +69,6 @@ public class ControllerContainerTest {
" <component id='com.yahoo.vespa.hosted.controller.api.integration.dns.MemoryNameService'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.api.integration.entity.MemoryEntityService'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.api.integration.github.GitHubMock'/>\n" +
- " <component id='com.yahoo.vespa.hosted.controller.api.integration.routing.MemoryGlobalRoutingService'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.api.integration.stubs.LoggingDeploymentIssues'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.api.integration.stubs.DummyOwnershipIssues'/>\n" +
" <component id='com.yahoo.vespa.hosted.controller.api.integration.stubs.MockRunDataStore'/>\n" +
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index 30c81a0721a..83605c31b42 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -46,7 +46,9 @@ import com.yahoo.vespa.hosted.controller.athenz.mock.AthenzDbMock;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.BuildJob;
import com.yahoo.vespa.hosted.controller.integration.ConfigServerMock;
+import com.yahoo.vespa.hosted.controller.integration.MetricsServiceMock;
import com.yahoo.vespa.hosted.controller.maintenance.ContactInformationMaintainer;
+import com.yahoo.vespa.hosted.controller.maintenance.DeploymentMetricsMaintainer;
import com.yahoo.vespa.hosted.controller.maintenance.JobControl;
import com.yahoo.vespa.hosted.controller.restapi.ContainerControllerTester;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
@@ -433,28 +435,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/", Request.Method.OPTIONS),
"");
- // GET global rotation status
- tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
- .userIdentity(USER_ID),
- new File("global-rotation.json"));
-
- // GET global rotation override status
- tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/global-rotation/override", GET)
- .userIdentity(USER_ID),
- new File("global-rotation-get.json"));
-
- // SET global rotation override status
- tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation/override", PUT)
- .userIdentity(USER_ID)
- .data("{\"reason\":\"because i can\"}"),
- new File("global-rotation-put.json"));
-
- // DELETE global rotation override status
- tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/environment/prod/region/us-west-1/instance/default/global-rotation/override", DELETE)
- .userIdentity(USER_ID)
- .data("{\"reason\":\"because i can\"}"),
- new File("global-rotation-delete.json"));
-
+ // Promote from pipeline
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/promote", POST)
.screwdriverIdentity(SCREWDRIVER_ID),
"{\"message\":\"Successfully copied environment hosted-verified-prod to hosted-instance_tenant1_application1_placeholder_component_default\"}");
@@ -480,6 +461,58 @@ public class ApplicationApiTest extends ControllerContainerTest {
}
@Test
+ public void testRotationOverride() {
+ // Setup
+ tester.computeVersionStatus();
+ createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
+ ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
+ .globalServiceId("foo")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ // Create tenant and deploy
+ ApplicationId id = createTenantAndApplication();
+ long projectId = 1;
+ HttpEntity deployData = createApplicationDeployData(Optional.empty(), false);
+ startAndTestChange(controllerTester, id, projectId, applicationPackage, deployData, 100);
+
+ // us-west-1
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy", POST)
+ .data(deployData)
+ .screwdriverIdentity(SCREWDRIVER_ID),
+ new File("deploy-result.json"));
+ controllerTester.jobCompletion(JobType.productionUsWest1)
+ .application(id)
+ .projectId(projectId)
+ .submit();
+ setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
+
+ // GET global rotation status
+ setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/global-rotation", GET)
+ .userIdentity(USER_ID),
+ new File("global-rotation.json"));
+
+ // GET global rotation override status
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/global-rotation/override", GET)
+ .userIdentity(USER_ID),
+ new File("global-rotation-get.json"));
+
+ // SET global rotation override status
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/global-rotation/override", PUT)
+ .userIdentity(USER_ID)
+ .data("{\"reason\":\"because i can\"}"),
+ new File("global-rotation-put.json"));
+
+ // DELETE global rotation override status
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/global-rotation/override", DELETE)
+ .userIdentity(USER_ID)
+ .data("{\"reason\":\"because i can\"}"),
+ new File("global-rotation-delete.json"));
+ }
+
+ @Test
public void testDeployDirectly() {
// Setup
tester.computeVersionStatus();
@@ -537,31 +570,13 @@ public class ApplicationApiTest extends ControllerContainerTest {
@Test
public void testSortsDeploymentsAndJobs() {
- // Setup
tester.computeVersionStatus();
- createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
-
- // Create tenant
- tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
- .userIdentity(USER_ID)
- .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
- .nToken(N_TOKEN),
- new File("tenant-without-applications.json"));
-
- // Create application
- tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST)
- .userIdentity(USER_ID)
- .nToken(N_TOKEN),
- new File("application-reference.json"));
-
- // Give Screwdriver project deploy access
- addScrewdriverUserToDeployRole(SCREWDRIVER_ID, ATHENZ_TENANT_DOMAIN, new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId("application1"));
// Deploy
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-east-3")
.build();
- ApplicationId id = ApplicationId.from("tenant1", "application1", "default");
+ ApplicationId id = createTenantAndApplication();
long projectId = 1;
HttpEntity deployData = createApplicationDeployData(Optional.empty(), false);
startAndTestChange(controllerTester, id, projectId, applicationPackage, deployData, 100);
@@ -595,6 +610,8 @@ public class ApplicationApiTest extends ControllerContainerTest {
.projectId(projectId)
.submit();
+ setZoneInRotation("rotation-fqdn-1", ZoneId.from("prod", "us-west-1"));
+
// us-east-3
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east-3/instance/default/deploy", POST)
.data(deployData)
@@ -1135,6 +1152,23 @@ public class ApplicationApiTest extends ControllerContainerTest {
athenzApplication.addRoleMember(ApplicationAction.deploy, screwdriverIdentity);
}
+ private ApplicationId createTenantAndApplication() {
+ createAthenzDomainWithAdmin(ATHENZ_TENANT_DOMAIN, USER_ID);
+ tester.assertResponse(request("/application/v4/tenant/tenant1", POST)
+ .userIdentity(USER_ID)
+ .data("{\"athensDomain\":\"domain1\", \"property\":\"property1\"}")
+ .nToken(N_TOKEN),
+ new File("tenant-without-applications.json"));
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", POST)
+ .userIdentity(USER_ID)
+ .nToken(N_TOKEN),
+ new File("application-reference.json"));
+ addScrewdriverUserToDeployRole(SCREWDRIVER_ID, ATHENZ_TENANT_DOMAIN,
+ new com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId("application1"));
+
+ return ApplicationId.from("tenant1", "application1", "default");
+ }
+
private void startAndTestChange(ContainerControllerTester controllerTester, ApplicationId application,
long projectId, ApplicationPackage applicationPackage,
HttpEntity deployData, long buildNumber) {
@@ -1198,7 +1232,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
clusterInfo.put(ClusterSpec.Id.from("cluster1"), new ClusterInfo("flavor1", 37, 2, 4, 50, ClusterSpec.Type.content, hostnames));
Map<ClusterSpec.Id, ClusterUtilization> clusterUtils = new HashMap<>();
clusterUtils.put(ClusterSpec.Id.from("cluster1"), new ClusterUtilization(0.3, 0.6, 0.4, 0.3));
- DeploymentMetrics metrics = new DeploymentMetrics(1,2,3,4,5);
+ DeploymentMetrics metrics = new DeploymentMetrics(1, 2, 3, 4, 5);
lockedApplication = lockedApplication
.withClusterInfo(deployment.zone(), clusterInfo)
@@ -1211,10 +1245,22 @@ public class ApplicationApiTest extends ControllerContainerTest {
}
}
+ private MetricsServiceMock metricsService() {
+ return (MetricsServiceMock) tester.container().components().getComponent(MetricsServiceMock.class.getName());
+ }
+
private MockOrganization organization() {
return (MockOrganization) tester.container().components().getComponent(MockOrganization.class.getName());
}
+ private void setZoneInRotation(String rotationName, ZoneId zone) {
+ String vipName = "proxy." + zone.value() + ".vip.test";
+ metricsService().addRotation(rotationName)
+ .setZoneIn(rotationName, vipName);
+
+ new DeploymentMetricsMaintainer(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator())).run();
+ }
+
private void updateContactInformation() {
new ContactInformationMaintainer(tester.controller(), Duration.ofDays(1),
new JobControl(tester.controller().curator()),
diff --git a/docker-api/pom.xml b/docker-api/pom.xml
index 74e463ef157..ae06378043b 100644
--- a/docker-api/pom.xml
+++ b/docker-api/pom.xml
@@ -128,6 +128,12 @@
<artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>testutil</artifactId>
+ <version>${project.version}</version>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerResources.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerResources.java
index 4c538d6a194..346223d0e7e 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerResources.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerResources.java
@@ -7,8 +7,8 @@ package com.yahoo.vespa.hosted.dockerapi;
public class ContainerResources {
public static final ContainerResources UNLIMITED = ContainerResources.from(0, 0);
- public final int cpuShares;
- public final long memoryBytes;
+ private final int cpuShares;
+ private final long memoryBytes;
ContainerResources(int cpuShares, long memoryBytes) {
this.cpuShares = cpuShares;
@@ -21,6 +21,14 @@ public class ContainerResources {
(long) ((1L << 30) * memoryGb));
}
+ public int cpuShares() {
+ return cpuShares;
+ }
+
+ public long memoryBytes() {
+ return memoryBytes;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStatsImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStats.java
index a56c1e41a51..738a65bc08b 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStatsImpl.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/ContainerStats.java
@@ -10,14 +10,14 @@ import java.util.Optional;
*
* @author freva
*/
-public class ContainerStatsImpl implements Docker.ContainerStats {
+public class ContainerStats {
private final Map<String, Object> networks;
private final Map<String, Object> cpuStats;
private final Map<String, Object> memoryStats;
private final Map<String, Object> blkioStats;
- public ContainerStatsImpl(Map<String, Object> networks, Map<String, Object> cpuStats,
- Map<String, Object> memoryStats, Map<String, Object> blkioStats) {
+ public ContainerStats(Map<String, Object> networks, Map<String, Object> cpuStats,
+ Map<String, Object> memoryStats, Map<String, Object> blkioStats) {
// Network stats are null when container uses host network
this.networks = Optional.ofNullable(networks).orElse(Collections.emptyMap());
this.cpuStats = cpuStats;
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java
index d95f7b7b8e1..cf168df4634 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java
@@ -6,6 +6,7 @@ import com.github.dockerjava.api.command.CreateContainerCmd;
import com.github.dockerjava.api.model.Bind;
import com.github.dockerjava.api.model.Capability;
import com.github.dockerjava.api.model.Ulimit;
+import com.yahoo.vespa.hosted.dockerapi.exception.DockerException;
import java.net.Inet6Address;
import java.net.InetAddress;
@@ -24,6 +25,7 @@ import java.util.stream.IntStream;
import java.util.stream.Stream;
class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
+
private final DockerClient docker;
private final DockerImage dockerImage;
private final ContainerResources containerResources;
@@ -148,8 +150,8 @@ class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
final CreateContainerCmd containerCmd = docker
.createContainerCmd(dockerImage.asString())
- .withCpuShares(containerResources.cpuShares)
- .withMemory(containerResources.memoryBytes)
+ .withCpuShares(containerResources.cpuShares())
+ .withMemory(containerResources.memoryBytes())
.withName(containerName.asString())
.withHostName(hostName)
.withLabels(labels)
@@ -202,11 +204,11 @@ class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
.skip(1)
.collect(Collectors.joining(" "));
- return String.join(" ",
+ return Stream.of(
"--name " + containerName.asString(),
"--hostname " + hostName,
- "--cpu-shares " + containerResources.cpuShares,
- "--memory " + containerResources.memoryBytes,
+ "--cpu-shares " + containerResources.cpuShares(),
+ "--memory " + containerResources.memoryBytes(),
toRepeatedOption("--label", labelList),
toRepeatedOption("--ulimit", ulimitList),
toRepeatedOption("--env", environmentAssignments),
@@ -219,7 +221,9 @@ class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
toOptionalOption("--entrypoint", entrypointExecuteable),
toFlagOption("--privileged", privileged),
dockerImage.asString(),
- entrypointArgs);
+ entrypointArgs)
+ .filter(s -> !s.isEmpty())
+ .collect(Collectors.joining(" "));
}
/**
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java
index 5e8a0feb099..8bde491d83b 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/Docker.java
@@ -2,8 +2,8 @@
package com.yahoo.vespa.hosted.dockerapi;
import java.net.InetAddress;
+import java.time.Duration;
import java.util.List;
-import java.util.Map;
import java.util.Optional;
/**
@@ -12,7 +12,8 @@ import java.util.Optional;
*/
public interface Docker {
/**
- * Must be called before any other method. May be called more than once.
+ * Should only be called by non-host-admin. May be called more than once.
+ * TODO: Remove when migration to host-admin is done
*/
void start();
@@ -61,21 +62,8 @@ public interface Docker {
ContainerName containerName,
String hostName);
- interface ContainerStats {
- Map<String, Object> getNetworks();
- Map<String, Object> getCpuStats();
- Map<String, Object> getMemoryStats();
- Map<String, Object> getBlkioStats();
- }
-
- default boolean networkNATed() {
- return false;
- }
-
Optional<ContainerStats> getContainerStats(ContainerName containerName);
- void createContainer(CreateContainerCommand createContainerCommand);
-
void startContainer(ContainerName containerName);
void stopContainer(ContainerName containerName);
@@ -97,12 +85,10 @@ public interface Docker {
*/
boolean pullImageAsyncIfNeeded(DockerImage image);
- void deleteImage(DockerImage dockerImage);
-
/**
* Deletes the local images that are currently not in use by any container and not recently used.
*/
- void deleteUnusedDockerImages();
+ boolean deleteUnusedDockerImages(List<DockerImage> excludes, Duration minImageAgeToDelete);
/**
* Execute a command in docker container as $VESPA_USER. Will block until the command is finished.
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImageGarbageCollector.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImageGarbageCollector.java
index 6e728972da9..0ae6004be12 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImageGarbageCollector.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImageGarbageCollector.java
@@ -3,107 +3,174 @@ package com.yahoo.vespa.hosted.dockerapi;
import com.github.dockerjava.api.model.Image;
import com.github.dockerjava.api.model.Container;
+import com.google.common.base.Strings;
+import com.yahoo.collections.Pair;
+import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
-import java.util.HashMap;
+import java.util.Collections;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Function;
+import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
+ * This class keeps track of downloaded docker images and helps delete images that have not been recently used
+ *
+ * <p>Definitions:
+ * <ul>
+ * <li>Every image has exactly 1 id</li>
+ * <li>Every image has between 0..n tags, see
+ * <a href="https://docs.docker.com/engine/reference/commandline/tag/">docker tag</a> for more</li>
+ * <li>Every image has 0..1 parent ids</li>
+ * </ul>
+ *
+ * <p>Limitations:
+ * <ol>
+ * <li>Image that has more than 1 tag cannot be deleted by ID</li>
+ * <li>Deleting a tag of an image with multiple tags will only remove the tag, the image with the
+ * remaining tags will remain</li>
+ * <li>Deleting the last tag of an image will delete the entire image.</li>
+ * <li>Image cannot be deleted if:</li>
+ * <ol>
+ * <li>It has 1 or more children</li>
+ * <li>A container uses it</li>
+ * </ol>
+ * </ol>
+ *
* @author freva
*/
-public class DockerImageGarbageCollector {
- private final Duration minAgeImageGc;
- private final Map<String, Instant> lastTimeUsedByImageId = new ConcurrentHashMap<>();
+class DockerImageGarbageCollector {
+ private static final Logger logger = Logger.getLogger(DockerImageGarbageCollector.class.getName());
- public DockerImageGarbageCollector(Duration minAgeImageToDelete) {
- minAgeImageGc = minAgeImageToDelete;
- }
+ private final Map<String, Instant> lastTimeUsedByImageId = new ConcurrentHashMap<>();
+ private final DockerImpl docker;
+ private final Clock clock;
- public void updateLastUsedTimeFor(String imageId) {
- updateLastUsedTimeFor(imageId, Instant.now());
+ DockerImageGarbageCollector(DockerImpl docker) {
+ this(docker, Clock.systemUTC());
}
- void updateLastUsedTimeFor(String imageId, Instant at) {
- lastTimeUsedByImageId.put(imageId, at);
+ DockerImageGarbageCollector(DockerImpl docker, Clock clock) {
+ this.docker = docker;
+ this.clock = clock;
}
/**
- * Generates lists of images that are safe to delete, in the order that is safe to delete them (children before
- * parents). The function starts with the set of all local images and then filters out images that are used now
- * and images that have been used recently (because they might be re-used again in near future).
+ * This method must be called frequently enough to see all containers to know which images are being used
*
- * @param images List of all the local images
- * @param containers List of all the containers, including the ones that are stopped
- * @return List of image tags of unused images, if unused image has no tag, will return image ID instead.
+ * @param excludes List of images (by tag or id) that should not be deleted regardless of their used status
+ * @param minImageAgeToDelete Minimum duration after which an image can be removed if it has not been used
+ * @return true iff at least 1 image was deleted
*/
- public List<DockerImage> getUnusedDockerImages(List<Image> images, List<Container> containers) {
- Map<String, Image> dockerImageByImageId = images.stream().collect(Collectors.toMap(Image::getId, img -> img));
- Map<String, Image> unusedImagesByContainers = filterOutImagesUsedByContainers(dockerImageByImageId, containers);
- Map<String, Image> unusedImagesByRecent = filterOutRecentImages(unusedImagesByContainers);
+ boolean deleteUnusedDockerImages(List<DockerImage> excludes, Duration minImageAgeToDelete) {
+ List<Image> images = docker.listAllImages();
+ List<Container> containers = docker.listAllContainers();
+
+ Map<String, Image> imageByImageId = images.stream().collect(Collectors.toMap(Image::getId, Function.identity()));
+
+ // Find all the ancestors for every local image id, this includes the image id itself
+ Map<String, Set<String>> ancestorsByImageId = images.stream()
+ .map(Image::getId)
+ .collect(Collectors.toMap(
+ Function.identity(),
+ imageId -> {
+ Set<String> ancestors = new HashSet<>();
+ while (!Strings.isNullOrEmpty(imageId)) {
+ ancestors.add(imageId);
+ imageId = Optional.of(imageId).map(imageByImageId::get).map(Image::getParentId).orElse(null);
+ }
+ return ancestors;
+ }
+ ));
- return unusedImagesByRecent.keySet().stream()
+ // The set of images that we want to keep is:
+ // 1. The images that were recently used
+ // 2. The images that were explicitly excluded
+ // 3. All of the ancestors of from images in 1 & 2
+ Set<String> imagesToKeep = Stream
+ .concat(
+ getRecentlyUsedImageIds(images, containers, minImageAgeToDelete).stream(), // 1
+ dockerImageToImageIds(excludes, images).stream()) // 2
+ .flatMap(imageId -> ancestorsByImageId.getOrDefault(imageId, Collections.emptySet()).stream()) // 3
+ .collect(Collectors.toSet());
+
+ // Now take all the images we have locally
+ return imageByImageId.keySet().stream()
+
+ // filter out images we want to keep
+ .filter(imageId -> !imagesToKeep.contains(imageId))
+
+ // Sort images in an order is safe to delete (children before parents)
.sorted((o1, o2) -> {
// If image2 is parent of image1, image1 comes before image2
- if (imageIsDescendantOf(unusedImagesByRecent, o1, o2)) return -1;
+ if (imageIsDescendantOf(imageByImageId, o1, o2)) return -1;
// If image1 is parent of image2, image2 comes before image1
- else if (imageIsDescendantOf(unusedImagesByRecent, o2, o1)) return 1;
+ else if (imageIsDescendantOf(imageByImageId, o2, o1)) return 1;
// Otherwise, sort lexicographically by image name (For testing)
else return o1.compareTo(o2);
})
+
+ // Map image IDs to tags if there are any
.flatMap(imageId -> {
// Deleting an image by image ID with multiple tags will fail -> map IDs to all the tags referring to the ID
- String[] repoTags = unusedImagesByRecent.get(imageId).getRepoTags();
- return (repoTags == null) ? Stream.of(imageId) : Stream.of(repoTags);
+ String[] repoTags = imageByImageId.get(imageId).getRepoTags();
+ return repoTags == null ? Stream.of(imageId) : Stream.of(repoTags);
})
- .map(DockerImage::new)
- .collect(Collectors.toList());
- }
- private Map<String, Image> filterOutImagesUsedByContainers(
- Map<String, Image> dockerImagesByImageId, List<com.github.dockerjava.api.model.Container> containerList) {
- Map<String, Image> filteredDockerImagesByImageId = new HashMap<>(dockerImagesByImageId);
-
- for (com.github.dockerjava.api.model.Container container : containerList) {
- String imageToSpare = container.getImageId();
- do {
- // May be null if two images have have the same parent, the first image will remove the parent, the
- // second will get null.
- Image sparedImage = filteredDockerImagesByImageId.remove(imageToSpare);
- imageToSpare = sparedImage == null ? "" : sparedImage.getParentId();
- } while (!imageToSpare.isEmpty());
- }
-
- return filteredDockerImagesByImageId;
+ // Delete image, if successful also remove last usage time to prevent re-download being instantly deleted
+ .peek(image -> {
+ logger.info("Deleting unused docker image " + image);
+ docker.deleteImage(new DockerImage(image));
+ lastTimeUsedByImageId.remove(image);
+ })
+ .count() > 0;
}
- private Map<String, Image> filterOutRecentImages(Map<String, Image> dockerImageByImageId) {
- Map<String, Image> filteredDockerImagesByImageId = new HashMap<>(dockerImageByImageId);
+ private Set<String> getRecentlyUsedImageIds(List<Image> images, List<Container> containers, Duration minImageAgeToDelete) {
+ final Instant now = clock.instant();
- final Instant now = Instant.now();
- filteredDockerImagesByImageId.keySet().forEach(imageId -> {
- if (! lastTimeUsedByImageId.containsKey(imageId)) lastTimeUsedByImageId.put(imageId, now);
- });
+ // Add any already downloaded image to the list once
+ images.forEach(image -> lastTimeUsedByImageId.putIfAbsent(image.getId(), now));
- lastTimeUsedByImageId.entrySet().stream()
- .filter(entry -> Duration.between(entry.getValue(), now).minus(minAgeImageGc).isNegative())
+ // Update last used time for all current containers
+ containers.forEach(container -> lastTimeUsedByImageId.put(container.getImageId(), now));
+
+ // Return list of images that have been used within minImageAgeToDelete
+ return lastTimeUsedByImageId.entrySet().stream()
+ .filter(entry -> Duration.between(entry.getValue(), now).minus(minImageAgeToDelete).isNegative())
.map(Map.Entry::getKey)
- .forEach(image -> {
- String imageToSpare = image;
- do {
- Image sparedImage = filteredDockerImagesByImageId.remove(imageToSpare);
- imageToSpare = sparedImage == null ? "" : sparedImage.getParentId();
- } while (!imageToSpare.isEmpty());
- });
- return filteredDockerImagesByImageId;
+ .collect(Collectors.toSet());
+ }
+
+ /**
+ * Attemps to make dockerImages which may be image tags or image ids to image ids. This only works
+ * if the given tag is actually present locally. This is fine, because if it isn't - we can't delete
+ * it, so no harm done.
+ */
+ private Set<String> dockerImageToImageIds(List<DockerImage> dockerImages, List<Image> images) {
+ Map<String, String> imageIdByImageTag = images.stream()
+ .flatMap(image -> Optional.ofNullable(image.getRepoTags())
+ .map(Stream::of)
+ .orElseGet(Stream::empty)
+ .map(repoTag -> new Pair<>(repoTag, image.getId())))
+ .collect(Collectors.toMap(Pair::getFirst, Pair::getSecond));
+
+ return dockerImages.stream()
+ .map(DockerImage::asString)
+ .map(tag -> imageIdByImageTag.getOrDefault(tag, tag))
+ .collect(Collectors.toSet());
}
/**
- * Returns true if ancestor is a parent or grand-parent or grand-grand-parent, etc. of img
+ * @return true if ancestor is a parent or grand-parent or grand-grand-parent, etc. of img
*/
private boolean imageIsDescendantOf(Map<String, Image> imageIdToImage, String img, String ancestor) {
while (imageIdToImage.containsKey(img)) {
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
index c5c4547f796..69ab697ea27 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.dockerapi;
import com.github.dockerjava.api.DockerClient;
import com.github.dockerjava.api.command.ExecCreateCmdResponse;
-import com.github.dockerjava.api.command.ExecStartCmd;
import com.github.dockerjava.api.command.InspectContainerResponse;
import com.github.dockerjava.api.command.InspectExecResponse;
import com.github.dockerjava.api.command.InspectImageResponse;
@@ -22,6 +21,9 @@ import com.github.dockerjava.core.command.PullImageResultCallback;
import com.github.dockerjava.jaxrs.JerseyDockerCmdExecFactory;
import com.google.inject.Inject;
import com.yahoo.log.LogLevel;
+import com.yahoo.vespa.hosted.dockerapi.exception.ContainerNotFoundException;
+import com.yahoo.vespa.hosted.dockerapi.exception.DockerException;
+import com.yahoo.vespa.hosted.dockerapi.exception.DockerExecTimeoutException;
import com.yahoo.vespa.hosted.dockerapi.metrics.CounterWrapper;
import com.yahoo.vespa.hosted.dockerapi.metrics.Dimensions;
import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
@@ -51,66 +53,40 @@ import static com.yahoo.vespa.hosted.dockerapi.DockerNetworkCreator.NetworkAddre
public class DockerImpl implements Docker {
private static final Logger logger = Logger.getLogger(DockerImpl.class.getName());
- public static final String DOCKER_CUSTOM_MACVLAN_NETWORK_NAME = "vespa-macvlan";
static final String LABEL_NAME_MANAGEDBY = "com.yahoo.vespa.managedby";
+ private static final String DOCKER_CUSTOM_MACVLAN_NETWORK_NAME = "vespa-macvlan";
private static final String FRAMEWORK_CONTAINER_PREFIX = "/";
-
- private final DockerConfig config;
- private final Optional<DockerImageGarbageCollector> dockerImageGC;
- private final int secondsToWaitBeforeKilling;
- private CounterWrapper numberOfDockerDaemonFails;
- private boolean started = false;
+ private static final int SECONDS_TO_WAIT_BEFORE_KILLING = 10;
private final Object monitor = new Object();
private final Set<DockerImage> scheduledPulls = new HashSet<>();
- private DockerClient dockerClient;
+ private final DockerClient dockerClient;
+ private final DockerImageGarbageCollector dockerImageGC;
+ private final CounterWrapper numberOfDockerDaemonFails;
@Inject
public DockerImpl(DockerConfig config, MetricReceiverWrapper metricReceiverWrapper) {
- this.config = config;
-
- secondsToWaitBeforeKilling = Optional.ofNullable(config)
- .map(DockerConfig::secondsToWaitBeforeKillingContainer)
- .orElse(10);
-
- dockerImageGC = Optional.ofNullable(config)
- .map(DockerConfig::imageGCMinTimeToLiveMinutes)
- .map(Duration::ofMinutes)
- .map(DockerImageGarbageCollector::new);
-
- Optional.ofNullable(metricReceiverWrapper).ifPresent(this::setMetrics);
+ this(createDockerClient(config), metricReceiverWrapper);
}
- // For testing
- DockerImpl(final DockerClient dockerClient) {
- this(null, null);
+ DockerImpl(DockerClient dockerClient, MetricReceiverWrapper metricReceiver) {
this.dockerClient = dockerClient;
+ this.dockerImageGC = new DockerImageGarbageCollector(this);
+
+ Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
+ numberOfDockerDaemonFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "daemon.api_fails");
}
@Override
public void start() {
- if (started) return;
- started = true;
-
- if (config != null) {
- dockerClient = createDockerClient(config);
-
- if (!config.networkNATed()) {
- try {
- setupDockerNetworkIfNeeded();
- } catch (Exception e) {
- throw new DockerException("Could not setup docker network", e);
- }
- }
+ try {
+ setupDockerNetworkIfNeeded();
+ } catch (Exception e) {
+ throw new DockerException("Could not setup docker network", e);
}
}
- @Override
- public boolean networkNATed() {
- return config.networkNATed();
- }
-
private void setupDockerNetworkIfNeeded() throws IOException {
if (!dockerClient.listNetworksCmd().withNameFilter(DOCKER_CUSTOM_MACVLAN_NETWORK_NAME).exec().isEmpty()) return;
@@ -140,7 +116,7 @@ public class DockerImpl implements Docker {
}
@Override
- public boolean pullImageAsyncIfNeeded(final DockerImage image) {
+ public boolean pullImageAsyncIfNeeded(DockerImage image) {
try {
synchronized (monitor) {
if (scheduledPulls.contains(image)) return true;
@@ -159,7 +135,7 @@ public class DockerImpl implements Docker {
}
}
- private void removeScheduledPoll(final DockerImage image) {
+ private void removeScheduledPoll(DockerImage image) {
synchronized (monitor) {
scheduledPulls.remove(image);
}
@@ -168,7 +144,7 @@ public class DockerImpl implements Docker {
/**
* Check if a given image is already in the local registry
*/
- boolean imageIsDownloaded(final DockerImage dockerImage) {
+ boolean imageIsDownloaded(DockerImage dockerImage) {
return inspectImage(dockerImage).isPresent();
}
@@ -195,6 +171,8 @@ public class DockerImpl implements Docker {
dockerClient.connectToNetworkCmd()
.withContainerId(containerName.asString())
.withNetworkId(networkName).exec();
+ } catch (NotFoundException e) {
+ throw new ContainerNotFoundException(containerName);
} catch (RuntimeException e) {
numberOfDockerDaemonFails.add();
throw new DockerException("Failed to connect container '" + containerName.asString() +
@@ -222,33 +200,27 @@ public class DockerImpl implements Docker {
*/
private ProcessResult executeInContainerAsUser(ContainerName containerName, String user, Optional<Long> timeoutSeconds, String... command) {
try {
- final ExecCreateCmdResponse response = dockerClient.execCreateCmd(containerName.asString())
- .withCmd(command)
- .withAttachStdout(true)
- .withAttachStderr(true)
- .withUser(user)
- .exec();
+ ExecCreateCmdResponse response = execCreateCmd(containerName, user, command);
ByteArrayOutputStream output = new ByteArrayOutputStream();
ByteArrayOutputStream errors = new ByteArrayOutputStream();
- ExecStartCmd execStartCmd = dockerClient.execStartCmd(response.getId());
- ExecStartResultCallback callback = execStartCmd.exec(new ExecStartResultCallback(output, errors));
+ ExecStartResultCallback callback = dockerClient.execStartCmd(response.getId())
+ .exec(new ExecStartResultCallback(output, errors));
if (timeoutSeconds.isPresent()) {
- if (!callback.awaitCompletion(timeoutSeconds.get(), TimeUnit.SECONDS)) {
- throw new DockerExecTimeoutException(String.format("Command '%s' did not finish within %s seconds.", command[0], timeoutSeconds));
- }
+ if (!callback.awaitCompletion(timeoutSeconds.get(), TimeUnit.SECONDS))
+ throw new DockerExecTimeoutException(String.format(
+ "Command '%s' did not finish within %s seconds.", command[0], timeoutSeconds));
} else {
// Wait for completion no timeout
callback.awaitCompletion();
}
- final InspectExecResponse state = dockerClient.inspectExecCmd(execStartCmd.getExecId()).exec();
- assert !state.isRunning();
- Integer exitCode = state.getExitCode();
- assert exitCode != null;
+ InspectExecResponse state = dockerClient.inspectExecCmd(response.getId()).exec();
+ if (state.isRunning())
+ throw new DockerException("Command '%s' did not finish within %s seconds.");
- return new ProcessResult(exitCode, new String(output.toByteArray()), new String(errors.toByteArray()));
+ return new ProcessResult(state.getExitCode(), new String(output.toByteArray()), new String(errors.toByteArray()));
} catch (RuntimeException | InterruptedException e) {
numberOfDockerDaemonFails.add();
throw new DockerException("Container '" + containerName.asString()
@@ -256,6 +228,19 @@ public class DockerImpl implements Docker {
}
}
+ private ExecCreateCmdResponse execCreateCmd(ContainerName containerName, String user, String... command) {
+ try {
+ return dockerClient.execCreateCmd(containerName.asString())
+ .withCmd(command)
+ .withAttachStdout(true)
+ .withAttachStderr(true)
+ .withUser(user)
+ .exec();
+ } catch (NotFoundException e) {
+ throw new ContainerNotFoundException(containerName);
+ }
+ }
+
private Optional<InspectContainerResponse> inspectContainerCmd(String container) {
try {
return Optional.of(dockerClient.inspectContainerCmd(container).exec());
@@ -273,7 +258,7 @@ public class DockerImpl implements Docker {
DockerStatsCallback statsCallback = dockerClient.statsCmd(containerName.asString()).exec(new DockerStatsCallback());
statsCallback.awaitCompletion(5, TimeUnit.SECONDS);
- return statsCallback.stats.map(stats -> new ContainerStatsImpl(
+ return statsCallback.stats.map(stats -> new ContainerStats(
stats.getNetworks(), stats.getCpuStats(), stats.getMemoryStats(), stats.getBlkioStats()));
} catch (NotFoundException ignored) {
return Optional.empty();
@@ -284,21 +269,11 @@ public class DockerImpl implements Docker {
}
@Override
- public void createContainer(CreateContainerCommand createContainerCommand) {
- try {
- dockerClient.execCreateCmd(createContainerCommand.toString());
- } catch (NotModifiedException ignored) {
- // If is already created, ignore
- } catch (RuntimeException e) {
- numberOfDockerDaemonFails.add();
- throw new DockerException("Failed to create container '" + createContainerCommand.toString() + "'", e);
- }
- }
-
- @Override
public void startContainer(ContainerName containerName) {
try {
dockerClient.startContainerCmd(containerName.asString()).exec();
+ } catch (NotFoundException e) {
+ throw new ContainerNotFoundException(containerName);
} catch (NotModifiedException ignored) {
// If is already started, ignore
} catch (RuntimeException e) {
@@ -308,9 +283,11 @@ public class DockerImpl implements Docker {
}
@Override
- public void stopContainer(final ContainerName containerName) {
+ public void stopContainer(ContainerName containerName) {
try {
- dockerClient.stopContainerCmd(containerName.asString()).withTimeout(secondsToWaitBeforeKilling).exec();
+ dockerClient.stopContainerCmd(containerName.asString()).withTimeout(SECONDS_TO_WAIT_BEFORE_KILLING).exec();
+ } catch (NotFoundException e) {
+ throw new ContainerNotFoundException(containerName);
} catch (NotModifiedException ignored) {
// If is already stopped, ignore
} catch (RuntimeException e) {
@@ -322,14 +299,9 @@ public class DockerImpl implements Docker {
@Override
public void deleteContainer(ContainerName containerName) {
try {
- dockerImageGC.ifPresent(imageGC -> {
- Optional<InspectContainerResponse> inspectResponse = inspectContainerCmd(containerName.asString());
- inspectResponse.ifPresent(response -> imageGC.updateLastUsedTimeFor(response.getImageId()));
- });
-
dockerClient.removeContainerCmd(containerName.asString()).exec();
- } catch (NotFoundException ignored) {
- // If container doesn't exist ignore
+ } catch (NotFoundException e) {
+ throw new ContainerNotFoundException(containerName);
} catch (RuntimeException e) {
numberOfDockerDaemonFails.add();
throw new DockerException("Failed to delete container '" + containerName.asString() + "'", e);
@@ -366,7 +338,7 @@ public class DockerImpl implements Docker {
.orElse(Stream.empty());
}
- private boolean isManagedBy(final com.github.dockerjava.api.model.Container container, String manager) {
+ private boolean isManagedBy(com.github.dockerjava.api.model.Container container, String manager) {
final Map<String, String> labels = container.getLabels();
return labels != null && manager.equals(labels.get(LABEL_NAME_MANAGEDBY));
}
@@ -375,7 +347,7 @@ public class DockerImpl implements Docker {
return encodedContainerName.substring(FRAMEWORK_CONTAINER_PREFIX.length());
}
- private List<com.github.dockerjava.api.model.Container> listAllContainers() {
+ List<com.github.dockerjava.api.model.Container> listAllContainers() {
try {
return dockerClient.listContainersCmd().withShowAll(true).exec();
} catch (RuntimeException e) {
@@ -384,7 +356,7 @@ public class DockerImpl implements Docker {
}
}
- private List<Image> listAllImages() {
+ List<Image> listAllImages() {
try {
return dockerClient.listImagesCmd().withShowAll(true).exec();
} catch (RuntimeException e) {
@@ -393,8 +365,7 @@ public class DockerImpl implements Docker {
}
}
- @Override
- public void deleteImage(final DockerImage dockerImage) {
+ void deleteImage(DockerImage dockerImage) {
try {
dockerClient.removeImageCmd(dockerImage.asString()).exec();
} catch (NotFoundException ignored) {
@@ -406,13 +377,8 @@ public class DockerImpl implements Docker {
}
@Override
- public void deleteUnusedDockerImages() {
- if (!dockerImageGC.isPresent()) return;
-
- List<Image> images = listAllImages();
- List<com.github.dockerjava.api.model.Container> containers = listAllContainers();
-
- dockerImageGC.get().getUnusedDockerImages(images, containers).forEach(this::deleteImage);
+ public boolean deleteUnusedDockerImages(List<DockerImage> excludes, Duration minImageAgeToDelete) {
+ return dockerImageGC.deleteUnusedDockerImages(excludes, minImageAgeToDelete);
}
private class ImagePullCallback extends PullImageResultCallback {
@@ -431,10 +397,8 @@ public class DockerImpl implements Docker {
@Override
public void onComplete() {
- Optional<InspectImageResponse> image = inspectImage(dockerImage);
- if (image.isPresent()) { // Download successful, update image GC with the newly downloaded image
+ if (imageIsDownloaded(dockerImage)) {
logger.log(LogLevel.INFO, "Download completed: " + dockerImage.asString());
- dockerImageGC.ifPresent(imageGC -> imageGC.updateLastUsedTimeFor(image.get().getId()));
removeScheduledPoll(dockerImage);
} else {
throw new DockerClientException("Could not download image: " + dockerImage);
@@ -479,9 +443,4 @@ public class DockerImpl implements Docker {
return DockerClientImpl.getInstance(dockerClientConfig)
.withDockerCmdExecFactory(dockerFactory);
}
-
- void setMetrics(MetricReceiverWrapper metricReceiver) {
- Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
- numberOfDockerDaemonFails = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "daemon.api_fails");
- }
}
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/ContainerNotFoundException.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/ContainerNotFoundException.java
new file mode 100644
index 00000000000..b237228ee8e
--- /dev/null
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/ContainerNotFoundException.java
@@ -0,0 +1,13 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.dockerapi.exception;
+
+import com.yahoo.vespa.hosted.dockerapi.ContainerName;
+
+/**
+ * @author freva
+ */
+public class ContainerNotFoundException extends DockerException {
+ public ContainerNotFoundException(ContainerName containerName) {
+ super("No such container: " + containerName.asString());
+ }
+}
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerException.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/DockerException.java
index b5b622977fb..df6bb702bf7 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerException.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/DockerException.java
@@ -1,5 +1,5 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.dockerapi;
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.dockerapi.exception;
/**
* This exception wraps any exception thrown by docker-java
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerExecTimeoutException.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/DockerExecTimeoutException.java
index a315bc09d0b..39813db5c1e 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerExecTimeoutException.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/DockerExecTimeoutException.java
@@ -1,5 +1,5 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.dockerapi;
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.dockerapi.exception;
/**
* Runtime exception to be thrown when the exec commands did not finish in time.
@@ -10,7 +10,7 @@ package com.yahoo.vespa.hosted.dockerapi;
* @author smorgrav
*/
@SuppressWarnings("serial")
-public class DockerExecTimeoutException extends RuntimeException {
+public class DockerExecTimeoutException extends DockerException {
public DockerExecTimeoutException(String msg) {
super(msg);
}
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/package-info.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/package-info.java
new file mode 100644
index 00000000000..a5ec5f6c235
--- /dev/null
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/exception/package-info.java
@@ -0,0 +1,5 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+package com.yahoo.vespa.hosted.dockerapi.exception;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/docker-api/src/main/resources/configdefinitions/docker.def b/docker-api/src/main/resources/configdefinitions/docker.def
index 83fee05dff6..3d594e5d62b 100644
--- a/docker-api/src/main/resources/configdefinitions/docker.def
+++ b/docker-api/src/main/resources/configdefinitions/docker.def
@@ -3,13 +3,13 @@ namespace=vespa.hosted.dockerapi
uri string default = "unix:///host/var/run/docker.sock"
-secondsToWaitBeforeKillingContainer int default = 10
maxPerRouteConnections int default = 10
maxTotalConnections int default = 100
connectTimeoutMillis int default = 100000 # 100 sec
readTimeoutMillis int default = 1800000 # 30 min
+# TODO: Remove
+secondsToWaitBeforeKillingContainer int default = 10
isRunningLocally bool default = false
imageGCMinTimeToLiveMinutes int default = 45
-
networkNATed bool default = false
diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImageGarbageCollectionTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImageGarbageCollectionTest.java
index a78c9280e4e..5287d2cb45d 100644
--- a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImageGarbageCollectionTest.java
+++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImageGarbageCollectionTest.java
@@ -5,145 +5,206 @@ import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.dockerjava.api.model.Image;
+import com.yahoo.test.ManualClock;
import org.junit.Test;
import java.io.IOException;
import java.time.Duration;
-import java.time.Instant;
import java.util.Arrays;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.stream.Collectors;
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertThat;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
/**
* @author freva
*/
public class DockerImageGarbageCollectionTest {
+
+ private final ImageGcTester gcTester = new ImageGcTester();
+
@Test
public void noImagesMeansNoUnusedImages() {
- new ImageGcTester(0)
- .withExistingImages()
- .expectUnusedImages();
+ gcTester.withExistingImages()
+ .expectDeletedImages();
}
@Test
public void singleImageWithoutContainersIsUnused() {
- new ImageGcTester(0)
- .withExistingImages(new ImageBuilder("image-1"))
- .expectUnusedImages("image-1");
+ gcTester.withExistingImages(new ImageBuilder("image-1"))
+ // Even though nothing is using the image, we will keep it for at least 1h
+ .expectDeletedImagesAfterMinutes(0)
+ .expectDeletedImagesAfterMinutes(30)
+ .expectDeletedImagesAfterMinutes(30, "image-1");
}
@Test
public void singleImageWithContainerIsUsed() {
- new ImageGcTester(0)
- .withExistingImages(ImageBuilder.forId("image-1"))
+ gcTester.withExistingImages(ImageBuilder.forId("image-1"))
.andExistingContainers(ContainerBuilder.forId("container-1").withImageId("image-1"))
- .expectUnusedImages();
+ .expectDeletedImages();
}
@Test
public void multipleUnusedImagesAreIdentified() {
- new ImageGcTester(0)
- .withExistingImages(
+ gcTester.withExistingImages(
ImageBuilder.forId("image-1"),
ImageBuilder.forId("image-2"))
- .expectUnusedImages("image-1", "image-2");
+ .expectDeletedImages("image-1", "image-2");
}
@Test
public void multipleUnusedLeavesAreIdentified() {
- new ImageGcTester(0)
- .withExistingImages(
+ gcTester.withExistingImages(
ImageBuilder.forId("parent-image"),
ImageBuilder.forId("image-1").withParentId("parent-image"),
ImageBuilder.forId("image-2").withParentId("parent-image"))
- .expectUnusedImages("image-1", "image-2", "parent-image");
+ .expectDeletedImages("image-1", "image-2", "parent-image");
}
@Test
public void unusedLeafWithUsedSiblingIsIdentified() {
- new ImageGcTester(0)
- .withExistingImages(
+ gcTester.withExistingImages(
ImageBuilder.forId("parent-image"),
ImageBuilder.forId("image-1").withParentId("parent-image").withTags("latest"),
ImageBuilder.forId("image-2").withParentId("parent-image").withTags("1.24"))
.andExistingContainers(ContainerBuilder.forId("vespa-node-1").withImageId("image-1"))
- .expectUnusedImages("1.24");
+ .expectDeletedImages("1.24"); // Deleting the only tag will delete the image
}
@Test
public void unusedImagesWithMultipleTags() {
- new ImageGcTester(0)
- .withExistingImages(
+ gcTester.withExistingImages(
ImageBuilder.forId("parent-image"),
ImageBuilder.forId("image-1").withParentId("parent-image")
.withTags("vespa-6", "vespa-6.28", "vespa:latest"))
- .expectUnusedImages("vespa-6", "vespa-6.28", "vespa:latest", "parent-image");
+ .expectDeletedImages("vespa-6", "vespa-6.28", "vespa:latest", "parent-image");
}
@Test
public void taggedImageWithNoContainersIsUnused() {
- new ImageGcTester(0)
- .withExistingImages(ImageBuilder.forId("image-1").withTags("vespa-6"))
- .expectUnusedImages("vespa-6");
+ gcTester.withExistingImages(ImageBuilder.forId("image-1").withTags("vespa-6"))
+ .expectDeletedImages("vespa-6");
}
@Test
public void unusedImagesWithSimpleImageGc() {
- new ImageGcTester(20)
+ gcTester.withExistingImages(ImageBuilder.forId("parent-image"))
+ .expectDeletedImagesAfterMinutes(30)
.withExistingImages(
- ImageBuilder.forId("parent-image").withLastUsedMinutesAgo(25),
- ImageBuilder.forId("image-1").withParentId("parent-image").withLastUsedMinutesAgo(5))
- .expectUnusedImages();
+ ImageBuilder.forId("parent-image"),
+ ImageBuilder.forId("image-1").withParentId("parent-image"))
+ .expectDeletedImagesAfterMinutes(0)
+ .expectDeletedImagesAfterMinutes(30)
+ // At this point, parent-image has been unused for 1h, but image-1 depends on parent-image and it has
+ // only been unused for 30m, so we cannot delete parent-image yet. 30 mins later both can be removed
+ .expectDeletedImagesAfterMinutes(30, "image-1", "parent-image");
}
@Test
- public void unusedImagesWithImageGc() {
- new ImageGcTester(20)
- .withExistingImages(
- ImageBuilder.forId("parent-1").withLastUsedMinutesAgo(40),
- ImageBuilder.forId("parent-2").withTags("p-tag:1").withLastUsedMinutesAgo(10),
- ImageBuilder.forId("image-1-1").withParentId("parent-1").withTags("i-tag:1", "i-tag:2", "i-tag-3").withLastUsedMinutesAgo(5),
- ImageBuilder.forId("image-1-2").withParentId("parent-1").withLastUsedMinutesAgo(25),
- ImageBuilder.forId("image-2-1").withParentId("parent-2").withTags("i-tag:4").withLastUsedMinutesAgo(30))
- .andExistingContainers(
- ContainerBuilder.forId("cont-1").withImageId("image-1-1"))
- .expectUnusedImages("image-1-2", "i-tag:4");
+ public void reDownloadingImageIsNotImmediatelyDeleted() {
+ gcTester.withExistingImages(ImageBuilder.forId("image"))
+ .expectDeletedImages("image") // After 1h we delete image
+ .expectDeletedImagesAfterMinutes(0) // image is immediately re-downloaded, but is not deleted
+ .expectDeletedImagesAfterMinutes(10)
+ .expectDeletedImages("image"); // 1h after re-download it is deleted again
}
- private static class ImageGcTester {
- private static DockerImageGarbageCollector imageGC;
- private List<Image> existingImages = Collections.emptyList();
- private List<com.github.dockerjava.api.model.Container> existingContainers = Collections.emptyList();
+ /** Same scenario as in {@link #multipleUnusedImagesAreIdentified()} */
+ @Test
+ public void doesNotDeleteExcludedByIdImages() {
+ gcTester.withExistingImages(
+ ImageBuilder.forId("parent-image"),
+ ImageBuilder.forId("image-1").withParentId("parent-image"),
+ ImageBuilder.forId("image-2").withParentId("parent-image"))
+ // Normally, image-1 and parent-image should also be deleted, but because we exclude image-1
+ // we cannot delete parent-image, so only image-2 is deleted
+ .expectDeletedImages(Collections.singletonList("image-1"), "image-2");
+ }
- private ImageGcTester(int imageGcMinTimeInMinutes) {
- imageGC = new DockerImageGarbageCollector(Duration.ofMinutes(imageGcMinTimeInMinutes));
- }
+ /** Same as in {@link #doesNotDeleteExcludedByIdImages()} but with tags */
+ @Test
+ public void doesNotDeleteExcludedByTagImages() {
+ gcTester.withExistingImages(
+ ImageBuilder.forId("parent-image").withTags("rhel-6"),
+ ImageBuilder.forId("image-1").withParentId("parent-image").withTags("vespa:6.288.16"),
+ ImageBuilder.forId("image-2").withParentId("parent-image").withTags("vespa:6.289.94"))
+ .expectDeletedImages(Collections.singletonList("vespa:6.288.16"), "vespa:6.289.94");
+ }
- private ImageGcTester withExistingImages(final ImageBuilder... images) {
- this.existingImages = Arrays.stream(images)
+ @Test
+ public void exludingNotDownloadedImageIsNoop() {
+ gcTester.withExistingImages(
+ ImageBuilder.forId("parent-image").withTags("rhel-6"),
+ ImageBuilder.forId("image-1").withParentId("parent-image").withTags("vespa:6.288.16"),
+ ImageBuilder.forId("image-2").withParentId("parent-image").withTags("vespa:6.289.94"))
+ .expectDeletedImages(Collections.singletonList("vespa:6.300.1"), "vespa:6.288.16", "vespa:6.289.94", "rhel-6");
+ }
+
+ private class ImageGcTester {
+ private final DockerImpl docker = mock(DockerImpl.class);
+ private final ManualClock clock = new ManualClock();
+ private final DockerImageGarbageCollector imageGC = new DockerImageGarbageCollector(docker, clock);
+ private final Map<DockerImage, Integer> numDeletes = new HashMap<>();
+ private boolean initialized = false;
+
+ private ImageGcTester withExistingImages(ImageBuilder... images) {
+ when(docker.listAllImages()).thenReturn(Arrays.stream(images)
.map(ImageBuilder::toImage)
- .collect(Collectors.toList());
+ .collect(Collectors.toList()));
return this;
}
- private ImageGcTester andExistingContainers(final ContainerBuilder... containers) {
- this.existingContainers = Arrays.stream(containers)
+ private ImageGcTester andExistingContainers(ContainerBuilder... containers) {
+ when(docker.listAllContainers()).thenReturn(Arrays.stream(containers)
.map(ContainerBuilder::toContainer)
- .collect(Collectors.toList());
+ .collect(Collectors.toList()));
return this;
}
- private void expectUnusedImages(final String... imageIds) {
- final List<DockerImage> expectedUnusedImages = Arrays.stream(imageIds)
+ private ImageGcTester expectDeletedImages(String... imageIds) {
+ return expectDeletedImagesAfterMinutes(60, imageIds);
+ }
+
+ private ImageGcTester expectDeletedImages(List<String> except, String... imageIds) {
+ return expectDeletedImagesAfterMinutes(60, except, imageIds);
+ }
+ private ImageGcTester expectDeletedImagesAfterMinutes(int minutesAfter, String... imageIds) {
+ return expectDeletedImagesAfterMinutes(minutesAfter, Collections.emptyList(), imageIds);
+ }
+
+ private ImageGcTester expectDeletedImagesAfterMinutes(int minutesAfter, List<String> except, String... imageIds) {
+ if (!initialized) {
+ // Run once with a very long expiry to initialize internal state of existing images
+ imageGC.deleteUnusedDockerImages(Collections.emptyList(), Duration.ofDays(999));
+ initialized = true;
+ }
+
+ clock.advance(Duration.ofMinutes(minutesAfter));
+
+ imageGC.deleteUnusedDockerImages(
+ except.stream().map(DockerImage::new).collect(Collectors.toList()),
+ Duration.ofHours(1).minusSeconds(1));
+
+ Arrays.stream(imageIds)
.map(DockerImage::new)
- .collect(Collectors.toList());
+ .forEach(image -> {
+ int newValue = numDeletes.getOrDefault(image, 0) + 1;
+ numDeletes.put(image, newValue);
+ verify(docker, times(newValue)).deleteImage(eq(image));
+ });
- assertThat(imageGC.getUnusedDockerImages(existingImages, existingContainers), is(expectedUnusedImages));
+ verify(docker, times(numDeletes.values().stream().mapToInt(i -> i).sum())).deleteImage(any());
+ return this;
}
}
@@ -185,11 +246,6 @@ public class DockerImageGarbageCollectionTest {
private static ImageBuilder forId(String id) { return new ImageBuilder(id); }
private ImageBuilder withParentId(String parentId) { this.parentId = parentId; return this; }
private ImageBuilder withTags(String... tags) { this.repoTags = tags; return this; }
- private ImageBuilder withLastUsedMinutesAgo(int minutesAgo) {
- ImageGcTester.imageGC.updateLastUsedTimeFor(id, Instant.now().minus(Duration.ofMinutes(minutesAgo)));
- return this;
- }
-
private Image toImage() { return createFrom(Image.class, this); }
}
diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImplTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImplTest.java
index 13ff45808cf..f1a8d4ef65e 100644
--- a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImplTest.java
+++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerImplTest.java
@@ -33,6 +33,10 @@ import static org.mockito.Mockito.when;
*/
public class DockerImplTest {
+ private final DockerClient dockerClient = mock(DockerClient.class);
+ private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
+ private final DockerImpl docker = new DockerImpl(dockerClient, metricReceiver);
+
@Test
public void testExecuteCompletes() {
final String containerId = "container-id";
@@ -40,8 +44,6 @@ public class DockerImplTest {
final String execId = "exec-id";
final int exitCode = 3;
- final DockerClient dockerClient = mock(DockerClient.class);
-
final ExecCreateCmdResponse response = mock(ExecCreateCmdResponse.class);
when(response.getId()).thenReturn(execId);
@@ -64,7 +66,6 @@ public class DockerImplTest {
when(state.isRunning()).thenReturn(false);
when(state.getExitCode()).thenReturn(exitCode);
- final Docker docker = new DockerImpl(dockerClient);
final ProcessResult result = docker.executeInContainer(new ContainerName(containerId), command);
assertThat(result.getExitStatus(), is(exitCode));
}
@@ -87,12 +88,9 @@ public class DockerImplTest {
PullImageCmd pullImageCmd = mock(PullImageCmd.class);
when(pullImageCmd.exec(resultCallback.capture())).thenReturn(null);
- final DockerClient dockerClient = mock(DockerClient.class);
when(dockerClient.inspectImageCmd(image.asString())).thenReturn(imageInspectCmd);
when(dockerClient.pullImageCmd(eq(image.asString()))).thenReturn(pullImageCmd);
- final DockerImpl docker = new DockerImpl(dockerClient);
- docker.setMetrics(new MetricReceiverWrapper(MetricReceiver.nullImplementation));
assertTrue("Should return true, we just scheduled the pull", docker.pullImageAsyncIfNeeded(image));
assertTrue("Should return true, the pull i still ongoing", docker.pullImageAsyncIfNeeded(image));
@@ -114,12 +112,9 @@ public class DockerImplTest {
PullImageCmd pullImageCmd = mock(PullImageCmd.class);
when(pullImageCmd.exec(resultCallback.capture())).thenReturn(null);
- final DockerClient dockerClient = mock(DockerClient.class);
when(dockerClient.inspectImageCmd(image.asString())).thenReturn(imageInspectCmd);
when(dockerClient.pullImageCmd(eq(image.asString()))).thenReturn(pullImageCmd);
- final DockerImpl docker = new DockerImpl(dockerClient);
- docker.setMetrics(new MetricReceiverWrapper(MetricReceiver.nullImplementation));
assertTrue("Should return true, we just scheduled the pull", docker.pullImageAsyncIfNeeded(image));
assertTrue("Should return true, the pull i still ongoing", docker.pullImageAsyncIfNeeded(image));
diff --git a/fnet/src/tests/frt/rpc/invoke.cpp b/fnet/src/tests/frt/rpc/invoke.cpp
index e3bd662214f..dd08f365d58 100644
--- a/fnet/src/tests/frt/rpc/invoke.cpp
+++ b/fnet/src/tests/frt/rpc/invoke.cpp
@@ -1,100 +1,92 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/net/socket_spec.h>
+#include <vespa/vespalib/util/benchmark_timer.h>
#include <vespa/fnet/frt/frt.h>
#include <mutex>
#include <condition_variable>
-//-------------------------------------------------------------
+using vespalib::SocketSpec;
+using vespalib::BenchmarkTimer;
-#include "my_crypto_engine.hpp"
-vespalib::CryptoEngine::SP crypto;
+constexpr double timeout = 60.0;
+constexpr double short_timeout = 0.1;
//-------------------------------------------------------------
-std::mutex _delayedReturnCntLock;
-uint32_t _delayedReturnCnt = 0;
-
-uint32_t _phase_simple_cnt = 0;
-uint32_t _phase_void_cnt = 0;
-uint32_t _phase_speed_cnt = 0;
-uint32_t _phase_advanced_cnt = 0;
-uint32_t _phase_error_cnt = 0;
-uint32_t _phase_timeout_cnt = 0;
-uint32_t _phase_abort_cnt = 0;
-uint32_t _phase_echo_cnt = 0;
+#include "my_crypto_engine.hpp"
+vespalib::CryptoEngine::SP crypto;
//-------------------------------------------------------------
-struct LockedReqWait : public FRT_IRequestWait
-{
- std::mutex _condLock; // cond used to signal req done
- std::condition_variable _cond; // cond used to signal req done
- bool _done; // flag indicating req done
-
- std::mutex _lockLock; // lock protecting virtual lock
- bool _lock; // virtual lock
- bool _wasLocked; // was 'locked' when req done
-
- LockedReqWait() : _cond(), _done(false), _lockLock(), _lock(false), _wasLocked(false) {}
- ~LockedReqWait() {}
-
- void lock() {
- std::lock_guard<std::mutex> guard(_lockLock);
- _lock = true;
- }
-
- void unlock() {
- std::lock_guard<std::mutex> guard(_lockLock);
- _lock = false;
- }
-
- bool isLocked() {
- std::lock_guard<std::mutex> guard(_lockLock);
- return _lock;
+class RequestLatch : public FRT_IRequestWait {
+private:
+ FRT_RPCRequest *_req;
+ std::mutex _lock;
+ std::condition_variable _cond;
+public:
+ RequestLatch() : _req(nullptr), _lock(), _cond() {}
+ ~RequestLatch() { ASSERT_TRUE(_req == nullptr); }
+ bool has_req() {
+ std::lock_guard guard(_lock);
+ return (_req != nullptr);
}
-
- void RequestDone(FRT_RPCRequest *) override {
- _wasLocked = isLocked();
- std::lock_guard<std::mutex> guard(_condLock);
- _done = true;
- _cond.notify_one();
+ FRT_RPCRequest *read() {
+ std::unique_lock guard(_lock);
+ _cond.wait(guard, [&req = _req]{ return (req != nullptr); });
+ auto ret = _req;
+ _req = nullptr;
+ _cond.notify_all();
+ return ret;
}
-
- void waitReq() {
- std::unique_lock<std::mutex> guard(_condLock);
- while(!_done) {
- _cond.wait(guard);
- }
+ void write(FRT_RPCRequest *req) {
+ std::unique_lock guard(_lock);
+ _cond.wait(guard, [&req = _req]{ return (req == nullptr); });
+ _req = req;
+ _cond.notify_all();
}
+ void RequestDone(FRT_RPCRequest *req) override { write(req); }
};
//-------------------------------------------------------------
-class DelayedReturn : public FNET_Task
-{
+class MyReq {
private:
FRT_RPCRequest *_req;
-
- DelayedReturn(const DelayedReturn &);
- DelayedReturn &operator=(const DelayedReturn &);
-
public:
- DelayedReturn(FNET_Scheduler *sched, FRT_RPCRequest *req, double delay)
- : FNET_Task(sched),
- _req(req)
+ MyReq(FRT_RPCRequest *req) : _req(req) {}
+ MyReq(const char *method_name)
+ : _req(new FRT_RPCRequest())
{
- {
- std::lock_guard<std::mutex> guard(_delayedReturnCntLock);
- _delayedReturnCnt++;
- }
- Schedule(delay);
+ _req->SetMethodName(method_name);
}
-
- void PerformTask() override
+ MyReq(uint32_t value, bool async, uint32_t error, uint8_t extra)
+ : _req(new FRT_RPCRequest())
{
- _req->Return();
- std::lock_guard<std::mutex> guard(_delayedReturnCntLock);
- _delayedReturnCnt--;
+ _req->SetMethodName("test");
+ _req->GetParams()->AddInt32(value);
+ _req->GetParams()->AddInt32(error);
+ _req->GetParams()->AddInt8(extra);
+ _req->GetParams()->AddInt8((async) ? 1 : 0);
+ }
+ ~MyReq() {
+ if (_req != nullptr) {
+ _req->SubRef();
+ }
+ }
+ MyReq(const MyReq &rhs) = delete;
+ MyReq &operator=(const MyReq &rhs) = delete;
+ FRT_RPCRequest &get() { return *_req; }
+ FRT_RPCRequest *borrow() { return _req; }
+ FRT_RPCRequest *steal() {
+ auto ret = _req;
+ _req = nullptr;
+ return ret;
+ }
+ uint32_t get_int_ret() {
+ ASSERT_TRUE(_req != nullptr);
+ ASSERT_TRUE(_req->CheckReturnTypes("i"));
+ return _req->GetReturn()->GetValue(0)._intval32;
}
};
@@ -103,31 +95,22 @@ public:
class EchoTest : public FRT_Invokable
{
private:
- vespalib::Stash *_echo_stash;
- FRT_Values *_echo_args;
+ vespalib::Stash _echo_stash;
+ FRT_Values _echo_args;
EchoTest(const EchoTest &);
EchoTest &operator=(const EchoTest &);
public:
- EchoTest() : _echo_stash(nullptr), _echo_args(nullptr) {}
- ~EchoTest()
+ EchoTest(FRT_Supervisor *supervisor)
+ : _echo_stash(),
+ _echo_args(_echo_stash)
{
- delete _echo_args;
- delete _echo_stash;
- }
-
- void Init(FRT_Supervisor *supervisor)
- {
- _echo_stash = new vespalib::Stash();
- _echo_args = new FRT_Values(*_echo_stash);
- assert(_echo_stash != nullptr && _echo_args != nullptr);
-
FRT_ReflectionBuilder rb(supervisor);
rb.DefineMethod("echo", "*", "*",
FRT_METHOD(EchoTest::RPC_Echo), this);
- FRT_Values *args = _echo_args;
+ FRT_Values *args = &_echo_args;
args->EnsureFree(16);
args->AddInt8(8);
@@ -179,15 +162,14 @@ public:
args->SetData(&pt_data[2], "dat3", 4);
}
- bool PrepareEchoReq(FRT_RPCRequest *req)
+ bool prepare_params(FRT_RPCRequest &req)
{
FNET_DataBuffer buf;
- req->SetMethodName("echo");
- _echo_args->EncodeCopy(&buf);
- req->GetParams()->DecodeCopy(&buf, buf.GetDataLen());
- return (req->GetParams()->Equals(_echo_args) &&
- _echo_args->Equals(req->GetParams()));
+ _echo_args.EncodeCopy(&buf);
+ req.GetParams()->DecodeCopy(&buf, buf.GetDataLen());
+ return (req.GetParams()->Equals(&_echo_args) &&
+ _echo_args.Equals(req.GetParams()));
}
void RPC_Echo(FRT_RPCRequest *req)
@@ -196,7 +178,7 @@ public:
req->GetParams()->EncodeCopy(&buf);
req->GetReturn()->DecodeCopy(&buf, buf.GetDataLen());
- if (!req->GetReturn()->Equals(_echo_args) ||
+ if (!req->GetReturn()->Equals(&_echo_args) ||
!req->GetReturn()->Equals(req->GetParams()))
{
req->SetError(10000, "Streaming error");
@@ -209,19 +191,16 @@ public:
class TestRPC : public FRT_Invokable
{
private:
- FRT_Supervisor *_supervisor;
- FNET_Scheduler *_scheduler;
uint32_t _intValue;
+ RequestLatch _detached_req;
TestRPC(const TestRPC &);
TestRPC &operator=(const TestRPC &);
public:
- TestRPC(FRT_Supervisor *supervisor, // server supervisor
- FNET_Scheduler *scheduler) // client scheduler
- : _supervisor(supervisor),
- _scheduler(scheduler),
- _intValue(0)
+ TestRPC(FRT_Supervisor *supervisor)
+ : _intValue(0),
+ _detached_req()
{
FRT_ReflectionBuilder rb(supervisor);
@@ -233,7 +212,7 @@ public:
FRT_METHOD(TestRPC::RPC_IncValue), this);
rb.DefineMethod("getValue", "", "i",
FRT_METHOD(TestRPC::RPC_GetValue), this);
- rb.DefineMethod("testFast", "iiibb", "i",
+ rb.DefineMethod("test", "iibb", "i",
FRT_METHOD(TestRPC::RPC_Test), this);
}
@@ -241,10 +220,9 @@ public:
{
FRT_Values &param = *req->GetParams();
uint32_t value = param[0]._intval32;
- uint32_t delay = param[1]._intval32;
- uint32_t error = param[2]._intval32;
- uint8_t extra = param[3]._intval8;
- uint8_t async = param[4]._intval8;
+ uint32_t error = param[1]._intval32;
+ uint8_t extra = param[2]._intval8;
+ uint8_t async = param[3]._intval8;
req->GetReturn()->AddInt32(value);
if (extra != 0) {
@@ -254,49 +232,7 @@ public:
req->SetError(error);
}
if (async != 0) {
- req->Detach();
- if (delay == 0) {
- req->Return();
- } else {
- req->getStash().create<DelayedReturn>(_scheduler, req, ((double)delay) / 1000.0);
- }
- } else {
-
- if (delay > 0) {
-
- const char *suffix = "testFast";
- uint32_t suffix_len = strlen(suffix);
- uint32_t name_len = req->GetMethodNameLen();
- bool remote = req->GetContext()._value.VOIDP != nullptr;
- bool instant = name_len > suffix_len &&
- strcmp(req->GetMethodName() + name_len - suffix_len, suffix) == 0;
-
- if (remote && instant) {
-
- // block, but don't cripple server scheduler...
- // (NB: in 'real life', instant methods should never block)
-
- FastOS_TimeInterface *now = _supervisor->GetTransport()->GetTimeSampler();
- FNET_Scheduler *scheduler = _supervisor->GetScheduler();
- assert(scheduler->GetTimeSampler() == now);
-
- while (delay > 0) {
- if (delay > 20) {
- FastOS_Thread::Sleep(20);
- delay -= 20;
- } else {
- FastOS_Thread::Sleep(delay);
- delay = 0;
- }
- now->SetNow();
- scheduler->CheckTasks();
- }
-
- } else {
-
- FastOS_Thread::Sleep(delay);
- }
- }
+ _detached_req.write(req->Detach());
}
}
@@ -320,607 +256,188 @@ public:
{
req->GetReturn()->AddInt32(_intValue);
}
-};
-//-------------------------------------------------------------
-
-enum {
- OK_RET = 0,
- BOGUS_RET = 1
-};
-
-enum {
- PHASE_NULL = 0,
- PHASE_SETUP,
- PHASE_SIMPLE,
- PHASE_VOID,
- PHASE_SPEED,
- PHASE_ADVANCED,
- PHASE_ERROR,
- PHASE_TIMEOUT,
- PHASE_ABORT,
- PHASE_ECHO,
- PHASE_SHUTDOWN,
- PHASE_ZZZ
-};
-
-const char phase_names[PHASE_ZZZ][32] =
-{
- "nullptr",
- "SETUP",
- "SIMPLE",
- "VOID",
- "SPEED",
- "ADVANCED",
- "ERROR",
- "TIMEOUT",
- "ABORT",
- "ECHO",
- "SHUTDOWN"
-};
-
-enum {
- TIMING_NULL = 0,
- TIMING_INSTANT,
- TIMING_ZZZ
-};
-
-const char timing_names[TIMING_ZZZ][32] =
-{
- "nullptr",
- "INSTANT",
-};
-
-enum {
- HANDLING_NULL = 0,
- HANDLING_SYNC,
- HANDLING_ASYNC,
- HANDLING_ZZZ
-};
-
-const char handling_names[HANDLING_ZZZ][32] =
-{
- "nullptr",
- "SYNC",
- "ASYNC"
+ RequestLatch &detached_req() { return _detached_req; }
};
//-------------------------------------------------------------
-struct State {
+class Fixture
+{
+private:
FRT_Supervisor _client;
FRT_Supervisor _server;
- TestRPC _rpc;
- EchoTest _echo;
- std::string _peerSpec;
- uint32_t _testPhase;
- uint32_t _timing;
- uint32_t _handling;
- double _timeout;
+ vespalib::string _peerSpec;
FRT_Target *_target;
- FRT_RPCRequest *_req;
+ TestRPC _testRPC;
+ EchoTest _echoTest;
+
+public:
+ FRT_Target &target() { return *_target; }
+ FRT_Target *make_bad_target() { return _client.GetTarget("bogus address"); }
+ RequestLatch &detached_req() { return _testRPC.detached_req(); }
+ EchoTest &echo() { return _echoTest; }
- State()
+ Fixture()
: _client(crypto),
_server(crypto),
- _rpc(&_server, _client.GetScheduler()),
- _echo(),
_peerSpec(),
- _testPhase(PHASE_NULL),
- _timing(TIMING_NULL),
- _handling(HANDLING_NULL),
- _timeout(5.0),
_target(nullptr),
- _req(nullptr)
+ _testRPC(&_server),
+ _echoTest(&_server)
{
_client.GetTransport()->SetTCPNoDelay(true);
_server.GetTransport()->SetTCPNoDelay(true);
- _echo.Init(&_server);
+ ASSERT_TRUE(_server.Listen("tcp/0"));
+ ASSERT_TRUE(_server.Start());
+ ASSERT_TRUE(_client.Start());
+ _peerSpec = SocketSpec::from_host_port("localhost", _server.GetListenPort()).spec();
+ _target = _client.GetTarget(_peerSpec.c_str());
+ //---------------------------------------------------------------------
+ MyReq req("frt.rpc.ping");
+ target().InvokeSync(req.borrow(), timeout);
+ ASSERT_TRUE(!req.get().IsError());
}
- void SetTimeout(double timeout)
- {
- _timeout = timeout;
+ ~Fixture() {
+ _client.ShutDown(true);
+ _server.ShutDown(true);
+ _target->SubRef();
}
+};
- void NewReq()
- {
- if (_req != nullptr) {
- _req->SubRef();
- }
- _req = new FRT_RPCRequest();
- }
+//-------------------------------------------------------------
- void FreeReq()
+TEST_F("require that simple invocation works", Fixture()) {
+ MyReq req("inc");
+ req.get().GetParams()->AddInt32(502);
+ f1.target().InvokeSync(req.borrow(), timeout);
+ EXPECT_EQUAL(req.get_int_ret(), 503u);
+}
+
+TEST_F("require that void invocation works", Fixture()) {
{
- if (_req != nullptr) {
- _req->SubRef();
- }
- _req = nullptr;
+ MyReq req("setValue");
+ req.get().GetParams()->AddInt32(40);
+ f1.target().InvokeSync(req.borrow(), timeout);
+ EXPECT_TRUE(req.get().CheckReturnTypes(""));
}
-
- void LostReq()
{
- _req = nullptr;
+ MyReq req("incValue");
+ f1.target().InvokeVoid(req.steal());
}
-
- void PrepareTestMethod()
{
- NewReq();
- if (_timing != TIMING_INSTANT) {
- ASSERT_TRUE(false); // consult your dealer...
- }
- _req->SetMethodName("testFast");
+ MyReq req("incValue");
+ f1.target().InvokeVoid(req.steal());
}
-
- void SetTestParams(uint32_t value, uint32_t delay,
- uint32_t error = FRTE_NO_ERROR,
- uint8_t extra = 0)
{
- _req->GetParams()->AddInt32(value);
- _req->GetParams()->AddInt32(delay);
- _req->GetParams()->AddInt32(error);
- _req->GetParams()->AddInt8(extra);
- bool async = (_handling == HANDLING_ASYNC);
- if (_handling != HANDLING_SYNC &&
- _handling != HANDLING_ASYNC)
- {
- ASSERT_TRUE(false); // consult your dealer...
- }
- _req->GetParams()->AddInt8((async) ? 1 : 0);
+ MyReq req("getValue");
+ f1.target().InvokeSync(req.borrow(), timeout);
+ EXPECT_EQUAL(req.get_int_ret(), 42u);
}
-
- void InvokeSync();
- void InvokeVoid();
- void InvokeAsync(FRT_IRequestWait *w);
- void InvokeTest(uint32_t value,
- uint32_t delay = 0,
- uint32_t error = FRTE_NO_ERROR,
- uint8_t extra = 0);
- void InvokeTestAndAbort(uint32_t value,
- uint32_t delay = 0,
- uint32_t error = FRTE_NO_ERROR,
- uint8_t extra = 0);
- bool WaitForDelayedReturnCount(uint32_t wantedCount, double timeout);
-
-private:
- State(const State &);
- State &operator=(const State &);
-};
-
-
-void
-State::InvokeSync()
-{
- _target->InvokeSync(_req, _timeout);
-}
-
-
-void
-State::InvokeVoid()
-{
- _target->InvokeVoid(_req);
-}
-
-
-void
-State::InvokeAsync(FRT_IRequestWait *w)
-{
- _target->InvokeAsync(_req, _timeout, w);
-}
-
-
-void
-State::InvokeTest(uint32_t value, uint32_t delay,
- uint32_t error, uint8_t extra)
-{
- PrepareTestMethod();
- SetTestParams(value, delay, error, extra);
- InvokeSync();
-}
-
-
-void
-State::InvokeTestAndAbort(uint32_t value, uint32_t delay,
- uint32_t error, uint8_t extra)
-{
- PrepareTestMethod();
- SetTestParams(value, delay, error, extra);
- FRT_SingleReqWait w;
- InvokeAsync(&w);
- _req->Abort();
- w.WaitReq();
}
-bool
-State::WaitForDelayedReturnCount(uint32_t wantedCount, double timeout)
-{
- FastOS_Time timer;
- timer.SetNow();
- for (;;) {
- uint32_t delayedReturnCnt;
+TEST_F("measure minimal invocation latency", Fixture()) {
+ size_t cnt = 0;
+ uint32_t val = 0;
+ BenchmarkTimer timer(1.0);
+ while (timer.has_budget()) {
+ timer.before();
{
- std::lock_guard<std::mutex> guard(_delayedReturnCntLock);
- delayedReturnCnt = _delayedReturnCnt;
- }
- if (delayedReturnCnt == wantedCount) {
- return true;
- }
- if ((timer.MilliSecsToNow() / 1000.0) > timeout) {
- return false;
+ MyReq req("inc");
+ req.get().GetParams()->AddInt32(val);
+ f1.target().InvokeSync(req.borrow(), timeout);
+ ASSERT_TRUE(!req.get().IsError());
+ val = req.get_int_ret();
+ ++cnt;
}
- FastOS_Thread::Sleep(10);
+ timer.after();
}
+ EXPECT_EQUAL(cnt, val);
+ double t = timer.min_time();
+ fprintf(stderr, "latency of invocation: %1.3f ms\n", t * 1000.0);
}
-//-------------------------------------------------------------
-
-bool CheckTypes(FRT_RPCRequest *req, const char *spec) {
- return FRT_Values::CheckTypes(spec, req->GetReturnSpec());
-}
-
-FRT_Value &Get(FRT_RPCRequest *req, uint32_t idx) {
- return req->GetReturn()->GetValue(idx);
-}
-
-//-------------------------------------------------------------
-
-void TestSetup(State *_state) {
- ASSERT_TRUE(_state->_testPhase == PHASE_SETUP);
-
- bool listenOK = _state->_server.Listen("tcp/0");
-
- char spec[64];
- sprintf(spec, "tcp/localhost:%d", _state->_server.GetListenPort());
- _state->_peerSpec = spec;
-
- bool serverStartOK = _state->_server.Start();
- bool clientStartOK = _state->_client.Start();
-
- ASSERT_TRUE(listenOK);
- ASSERT_TRUE(serverStartOK);
- ASSERT_TRUE(clientStartOK);
-
- _state->_target = _state->_client.GetTarget(_state->_peerSpec.c_str());
- _state->NewReq();
- _state->_req->SetMethodName("frt.rpc.ping");
- _state->_target->InvokeSync(_state->_req, 5.0);
- ASSERT_TRUE(!_state->_req->IsError());
-}
-
-
-void TestSimple(State *_state) {
- ASSERT_TRUE(_state->_testPhase == PHASE_SIMPLE);
- _phase_simple_cnt++;
- _state->NewReq();
- _state->_req->SetMethodName("inc");
- _state->_req->GetParams()->AddInt32(502);
- _state->InvokeSync();
- EXPECT_TRUE(!_state->_req->IsError() &&
- CheckTypes(_state->_req, "i") &&
- Get(_state->_req, 0)._intval32 == 503);
+TEST_F("require that abort has no effect on a completed request", Fixture()) {
+ MyReq req(42, false, FRTE_NO_ERROR, 0);
+ f1.target().InvokeSync(req.borrow(), timeout);
+ EXPECT_EQUAL(req.get_int_ret(), 42u);
+ req.get().Abort();
+ EXPECT_EQUAL(req.get_int_ret(), 42u);
}
-
-void TestVoid(State *_state) {
- ASSERT_TRUE(_state->_testPhase == PHASE_VOID);
- _phase_void_cnt++;
-
- _state->NewReq();
- _state->_req->SetMethodName("setValue");
- _state->_req->GetParams()->AddInt32(40);
- _state->InvokeSync();
- EXPECT_TRUE(!_state->_req->IsError() &&
- CheckTypes(_state->_req, ""));
-
- _state->NewReq();
- _state->_req->SetMethodName("incValue");
- _state->InvokeVoid();
- _state->LostReq();
-
- _state->NewReq();
- _state->_req->SetMethodName("incValue");
- _state->InvokeVoid();
- _state->LostReq();
-
- _state->NewReq();
- _state->_req->SetMethodName("getValue");
- _state->InvokeSync();
- EXPECT_TRUE(!_state->_req->IsError() &&
- CheckTypes(_state->_req, "i") &&
- Get(_state->_req, 0)._intval32 == 42);
+TEST_F("require that a request can be responded to at a later time", Fixture()) {
+ RequestLatch result;
+ MyReq req(42, true, FRTE_NO_ERROR, 0);
+ f1.target().InvokeAsync(req.steal(), timeout, &result);
+ EXPECT_TRUE(!result.has_req());
+ f1.detached_req().read()->Return();
+ MyReq ret(result.read());
+ EXPECT_EQUAL(ret.get_int_ret(), 42u);
}
-
-void TestSpeed(State *_state) {
- ASSERT_TRUE(_state->_testPhase == PHASE_SPEED);
- _phase_speed_cnt++;
-
- FastOS_Time start;
- FastOS_Time stop;
- uint32_t val = 0;
- uint32_t cnt = 0;
-
- _state->NewReq();
- FRT_RPCRequest *req = _state->_req;
- FRT_Target *target = _state->_target;
-
- // calibrate cnt to be used
- start.SetNow();
- for (cnt = 0; cnt < 1000000; cnt++) {
- req->SetMethodName("inc");
- req->GetParams()->AddInt32(0);
- target->InvokeSync(req, 5.0);
- if (req->IsError()) {
- break;
- }
- req->Reset(); // ok if no error
- if (start.MilliSecsToNow() > 20.0) {
- break;
- }
- }
- cnt = (cnt == 0) ? 1 : cnt * 10;
-
- fprintf(stderr, "checking invocation latency... (cnt = %d)\n", cnt);
-
- _state->NewReq();
- req = _state->_req;
-
- // actual benchmark
- start.SetNow();
- for (uint32_t i = 0; i < cnt; i++) {
- req->SetMethodName("inc");
- req->GetParams()->AddInt32(val);
- target->InvokeSync(req, 60.0);
- if (req->IsError()) {
- fprintf(stderr, "... rpc error(%d): %s\n",
- req->GetErrorCode(),
- req->GetErrorMessage());
- break;
- }
- val = req->GetReturn()->GetValue(0)._intval32;
- req->Reset(); // ok if no error
+TEST_F("require that a bad target gives connection error", Fixture()) {
+ MyReq req("frt.rpc.ping");
+ {
+ FRT_Target *bad_target = f1.make_bad_target();
+ bad_target->InvokeSync(req.borrow(), timeout);
+ bad_target->SubRef();
}
- stop.SetNow();
- stop -= start;
- double latency = stop.MilliSecs() / (double) cnt;
-
- EXPECT_EQUAL(val, cnt);
- fprintf(stderr, "latency of invocation: %1.3f ms\n", latency);
+ EXPECT_EQUAL(req.get().GetErrorCode(), FRTE_RPC_CONNECTION);
}
-
-void TestAdvanced(State *_state) {
- ASSERT_TRUE(_state->_testPhase == PHASE_ADVANCED);
- _phase_advanced_cnt++;
-
- // Test invocation
- //----------------
- _state->InvokeTest(42);
- EXPECT_TRUE(!_state->_req->IsError() &&
- CheckTypes(_state->_req, "i") &&
- Get(_state->_req, 0)._intval32 == 42);
-
- // Abort has no effect after request is done
- //------------------------------------------
- _state->_req->Abort();
- EXPECT_TRUE(!_state->_req->IsError() &&
- CheckTypes(_state->_req, "i") &&
- Get(_state->_req, 0)._intval32 == 42);
-
- // Test invocation with delay
- //---------------------------
- _state->InvokeTest(58, 100);
- EXPECT_TRUE(!_state->_req->IsError() &&
- CheckTypes(_state->_req, "i") &&
- Get(_state->_req, 0)._intval32 == 58);
+TEST_F("require that non-existing method gives appropriate error", Fixture()) {
+ MyReq req("bogus");
+ f1.target().InvokeSync(req.borrow(), timeout);
+ EXPECT_EQUAL(req.get().GetErrorCode(), FRTE_RPC_NO_SUCH_METHOD);
}
-
-void TestError(State *_state) {
- ASSERT_TRUE(_state->_testPhase == PHASE_ERROR);
- _phase_error_cnt++;
-
- // bad target -> sync error -> avoid deadlock
- //-------------------------------------------
- if (_state->_handling == HANDLING_ASYNC)
- {
- // stash away valid target
- FRT_Target *stateTarget = _state->_target; // backup of valid target
-
- _state->_target = _state->_client.GetTarget("bogus address");
- _state->NewReq();
- _state->_req->SetMethodName("frt.rpc.ping");
- LockedReqWait lw;
- lw.lock();
- _state->InvokeAsync(&lw);
- lw.unlock();
- lw.waitReq();
- EXPECT_TRUE(!lw._wasLocked);
- EXPECT_TRUE(_state->_req->GetErrorCode() == FRTE_RPC_CONNECTION);
-
- // restore valid target
- _state->_target->SubRef();
- _state->_target = stateTarget;
- }
-
- // no such method
- //---------------
- if (_state->_timing == TIMING_INSTANT &&
- _state->_handling == HANDLING_SYNC)
- {
- _state->NewReq();
- _state->_req->SetMethodName("bogus");
- _state->InvokeSync();
- EXPECT_TRUE(_state->_req->GetErrorCode() == FRTE_RPC_NO_SUCH_METHOD);
- }
-
- // wrong params
- //-------------
- if (_state->_handling == HANDLING_SYNC) {
-
- _state->PrepareTestMethod();
- _state->InvokeSync();
- EXPECT_TRUE(_state->_req->GetErrorCode() == FRTE_RPC_WRONG_PARAMS);
-
- _state->PrepareTestMethod();
- _state->_req->GetParams()->AddInt32(42);
- _state->_req->GetParams()->AddInt32(0);
- _state->_req->GetParams()->AddInt8(0);
- _state->_req->GetParams()->AddInt8(0);
- _state->_req->GetParams()->AddInt8(0);
- _state->InvokeSync();
- EXPECT_TRUE(_state->_req->GetErrorCode() == FRTE_RPC_WRONG_PARAMS);
-
- _state->PrepareTestMethod();
- _state->_req->GetParams()->AddInt32(42);
- _state->_req->GetParams()->AddInt32(0);
- _state->_req->GetParams()->AddInt32(0);
- _state->_req->GetParams()->AddInt8(0);
- _state->_req->GetParams()->AddInt8(0);
- _state->_req->GetParams()->AddInt8(0);
- _state->InvokeSync();
- EXPECT_TRUE(_state->_req->GetErrorCode() == FRTE_RPC_WRONG_PARAMS);
- }
-
- // wrong return
- //-------------
- _state->InvokeTest(42, 0, 0, BOGUS_RET);
- EXPECT_TRUE(_state->_req->GetErrorCode() == FRTE_RPC_WRONG_RETURN);
-
- // method failed
- //--------------
- _state->InvokeTest(42, 0, 5000, BOGUS_RET);
- EXPECT_TRUE(_state->_req->GetErrorCode() == 5000);
+TEST_F("require that wrong parameter types give appropriate error", Fixture()) {
+ MyReq req("setValue");
+ req.get().GetParams()->AddString("40");
+ f1.target().InvokeSync(req.borrow(), timeout);
+ EXPECT_EQUAL(req.get().GetErrorCode(), FRTE_RPC_WRONG_PARAMS);
}
-
-void TestTimeout(State *_state) {
- ASSERT_TRUE(_state->_testPhase == PHASE_TIMEOUT);
- _phase_timeout_cnt++;
-
- _state->SetTimeout(0.1);
-
- // Test timeout
- //-------------
- _state->InvokeTest(123, 5000);
- EXPECT_TRUE(_state->_req->GetErrorCode() == FRTE_RPC_TIMEOUT);
- FastOS_Thread::Sleep(5500); // settle
-
- _state->SetTimeout(5.0);
+TEST_F("require that wrong return value types give appropriate error", Fixture()) {
+ MyReq req(42, false, FRTE_NO_ERROR, 1);
+ f1.target().InvokeSync(req.borrow(), timeout);
+ EXPECT_EQUAL(req.get().GetErrorCode(), FRTE_RPC_WRONG_RETURN);
}
-
-void TestAbort(State *_state) {
- ASSERT_TRUE(_state->_testPhase == PHASE_ABORT);
- _phase_abort_cnt++;
-
- // Test abort
- //-----------
- _state->InvokeTestAndAbort(456, 1000);
- EXPECT_TRUE(_state->_req->GetErrorCode() == FRTE_RPC_ABORT);
- FastOS_Thread::Sleep(1500); // settle
+TEST_F("require that the method itself can signal failure", Fixture()) {
+ MyReq req(42, false, 5000, 1);
+ f1.target().InvokeSync(req.borrow(), timeout);
+ EXPECT_EQUAL(req.get().GetErrorCode(), 5000u);
}
-
-void TestEcho(State *_state) {
- ASSERT_TRUE(_state->_testPhase == PHASE_ECHO);
- _phase_echo_cnt++;
-
- // Test echo
- //----------
- _state->NewReq();
- EXPECT_TRUE(_state->_echo.PrepareEchoReq(_state->_req));
- _state->InvokeSync();
- EXPECT_TRUE(!_state->_req->IsError());
- EXPECT_TRUE(_state->_req->GetReturn()->Equals(_state->_req->GetParams()));
+TEST_F("require that invocation can time out", Fixture()) {
+ RequestLatch result;
+ MyReq req(42, true, FRTE_NO_ERROR, 0);
+ f1.target().InvokeAsync(req.steal(), short_timeout, &result);
+ MyReq ret(result.read());
+ f1.detached_req().read()->Return();
+ EXPECT_EQUAL(ret.get().GetErrorCode(), FRTE_RPC_TIMEOUT);
}
+TEST_F("require that invocation can be aborted", Fixture()) {
+ RequestLatch result;
+ MyReq req(42, true, FRTE_NO_ERROR, 0);
+ FRT_RPCRequest *will_be_mine_again_soon = req.steal();
+ f1.target().InvokeAsync(will_be_mine_again_soon, timeout, &result);
+ will_be_mine_again_soon->Abort();
+ MyReq ret(result.read());
+ f1.detached_req().read()->Return();
+ EXPECT_EQUAL(ret.get().GetErrorCode(), FRTE_RPC_ABORT);
+}
-TEST_F("invoke test", State()) {
- State *_state = &f1;
-
- _state->_testPhase = PHASE_SETUP;
- TestSetup(_state);
-
- for (_state->_testPhase = PHASE_SIMPLE;
- _state->_testPhase < PHASE_SHUTDOWN;
- _state->_testPhase++) {
-
- {
- for (_state->_timing = TIMING_INSTANT;
- _state->_timing < TIMING_ZZZ;
- _state->_timing++) {
-
- for (_state->_handling = HANDLING_SYNC;
- _state->_handling < HANDLING_ZZZ;
- _state->_handling++) {
-
- switch (_state->_testPhase) {
- case PHASE_SIMPLE:
- if (_state->_timing == TIMING_INSTANT &&
- _state->_handling == HANDLING_SYNC)
- {
- TestSimple(_state);
- }
- break;
- case PHASE_VOID:
- if (_state->_timing == TIMING_INSTANT &&
- _state->_handling == HANDLING_SYNC)
- {
- TestVoid(_state);
- }
- break;
- case PHASE_SPEED:
- if (_state->_timing == TIMING_INSTANT &&
- _state->_handling == HANDLING_SYNC)
- {
- TestSpeed(_state);
- }
- break;
- case PHASE_ADVANCED:
- TestAdvanced(_state);
- break;
- case PHASE_ERROR:
- TestError(_state);
- break;
- case PHASE_TIMEOUT:
- TestTimeout(_state);
- break;
- case PHASE_ABORT:
- TestAbort(_state);
- break;
- case PHASE_ECHO:
- if (_state->_timing == TIMING_INSTANT &&
- _state->_handling == HANDLING_SYNC)
- {
- TestEcho(_state);
- }
- break;
- default:
- ASSERT_TRUE(false); // consult your dealer...
- }
- }
- }
- }
- }
- _state->_testPhase = PHASE_SHUTDOWN;
- _state->_timing = TIMING_NULL;
- _state->_handling = HANDLING_NULL;
- EXPECT_TRUE(_state->WaitForDelayedReturnCount(0, 120.0));
- _state->FreeReq();
- _state->_client.ShutDown(true);
- _state->_server.ShutDown(true);
- _state->_target->SubRef();
- _state->_target = nullptr;
- EXPECT_TRUE(_delayedReturnCnt == 0);
- EXPECT_TRUE(_phase_simple_cnt == 1);
- EXPECT_TRUE(_phase_void_cnt == 1);
- EXPECT_TRUE(_phase_speed_cnt == 1);
- EXPECT_TRUE(_phase_advanced_cnt == 2);
- EXPECT_TRUE(_phase_error_cnt == 2);
- EXPECT_TRUE(_phase_abort_cnt == 2);
- EXPECT_TRUE(_phase_echo_cnt == 1);
+TEST_F("require that parameters can be echoed as return values", Fixture()) {
+ MyReq req("echo");
+ ASSERT_TRUE(f1.echo().prepare_params(req.get()));
+ f1.target().InvokeSync(req.borrow(), timeout);
+ EXPECT_TRUE(!req.get().IsError());
+ EXPECT_TRUE(req.get().GetReturn()->Equals(req.get().GetParams()));
+ EXPECT_TRUE(req.get().GetParams()->Equals(req.get().GetReturn()));
}
TEST_MAIN() {
diff --git a/model-evaluation/src/main/java/ai/vespa/models/handler/ModelsEvaluationHandler.java b/model-evaluation/src/main/java/ai/vespa/models/handler/ModelsEvaluationHandler.java
index 1c995c255f5..683a1f345d8 100644
--- a/model-evaluation/src/main/java/ai/vespa/models/handler/ModelsEvaluationHandler.java
+++ b/model-evaluation/src/main/java/ai/vespa/models/handler/ModelsEvaluationHandler.java
@@ -16,6 +16,7 @@ import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.nio.charset.Charset;
+import java.util.Arrays;
import java.util.Optional;
import java.util.concurrent.Executor;
@@ -39,54 +40,37 @@ public class ModelsEvaluationHandler extends ThreadedHttpRequestHandler {
Optional<String> version = path.segment(1);
Optional<String> modelName = path.segment(2);
- if ( ! apiName.isPresent() || ! apiName.get().equalsIgnoreCase(API_ROOT)) {
- return new ErrorResponse(404, "unknown API");
- }
- if ( ! version.isPresent() || ! version.get().equalsIgnoreCase(VERSION_V1)) {
- return new ErrorResponse(404, "unknown API version");
- }
- if ( ! modelName.isPresent()) {
- return listAllModels(request);
- }
- if ( ! modelsEvaluator.models().containsKey(modelName.get())) {
- // TODO: Replace by catching IllegalArgumentException and passing that error message
- return new ErrorResponse(404, "no model with name '" + modelName.get() + "' found");
- }
-
- Model model = modelsEvaluator.models().get(modelName.get());
-
- // The following logic follows from the spec, in that signature and
- // output are optional if the model only has a single function.
- // TODO: Try to avoid recreating that logic here
-
- if (path.segments() == 3) {
- if (model.functions().size() > 1) {
- return listModelDetails(request, modelName.get());
- }
- return listTypeDetails(request, modelName.get());
- }
-
- if (path.segments() == 4) {
- if ( ! path.segment(3).get().equalsIgnoreCase(EVALUATE)) {
- return listTypeDetails(request, modelName.get(), path.segment(3).get());
+ try {
+ if ( ! apiName.isPresent() || ! apiName.get().equalsIgnoreCase(API_ROOT)) {
+ throw new IllegalArgumentException("unknown API");
}
- if (model.functions().stream().anyMatch(f -> f.getName().equalsIgnoreCase(EVALUATE))) {
- return listTypeDetails(request, modelName.get(), path.segment(3).get()); // model has a function "eval"
+ if ( ! version.isPresent() || ! version.get().equalsIgnoreCase(VERSION_V1)) {
+ throw new IllegalArgumentException("unknown API version");
}
- if (model.functions().size() <= 1) {
- return evaluateModel(request, modelName.get());
+ if ( ! modelName.isPresent()) {
+ return listAllModels(request);
}
- // TODO: Replace by catching IllegalArgumentException and passing that error message
- return new ErrorResponse(404, "attempt to evaluate model without specifying function");
- }
+ Model model = modelsEvaluator.requireModel(modelName.get());
- if (path.segments() == 5) {
- if (path.segment(4).get().equalsIgnoreCase(EVALUATE)) {
- return evaluateModel(request, modelName.get(), path.segment(3).get());
+ Optional<Integer> evalSegment = path.lastIndexOf(EVALUATE);
+ String[] function = path.range(3, evalSegment);
+ if (evalSegment.isPresent()) {
+ return evaluateModel(request, model, function);
}
+ return listModelInformation(request, model, function);
+
+ } catch (IllegalArgumentException e) {
+ return new ErrorResponse(404, e.getMessage());
}
+ }
- return new ErrorResponse(404, "unrecognized request");
+ private HttpResponse evaluateModel(HttpRequest request, Model model, String[] function) {
+ FunctionEvaluator evaluator = model.evaluatorOf(function);
+ for (String bindingName : evaluator.context().names()) {
+ property(request, bindingName).ifPresent(s -> evaluator.bind(bindingName, Tensor.from(s)));
+ }
+ Tensor result = evaluator.evaluate();
+ return new Response(200, JsonFormat.encode(result));
}
private HttpResponse listAllModels(HttpRequest request) {
@@ -98,28 +82,33 @@ public class ModelsEvaluationHandler extends ThreadedHttpRequestHandler {
return new Response(200, com.yahoo.slime.JsonFormat.toJsonBytes(slime));
}
- private HttpResponse listModelDetails(HttpRequest request, String modelName) {
- Model model = modelsEvaluator.models().get(modelName);
+ private HttpResponse listModelInformation(HttpRequest request, Model model, String[] function) {
Slime slime = new Slime();
Cursor root = slime.setObject();
- for (ExpressionFunction func : model.functions()) {
- root.setString(func.getName(), baseUrl(request) + modelName + "/" + func.getName());
+ root.setString("model", model.name());
+ if (function.length == 0) {
+ listFunctions(request, model, root);
+ } else {
+ listFunctionDetails(request, model, function, root);
}
return new Response(200, com.yahoo.slime.JsonFormat.toJsonBytes(slime));
}
- private HttpResponse listTypeDetails(HttpRequest request, String modelName) {
- return listTypeDetails(request, modelsEvaluator.evaluatorOf(modelName));
- }
-
- private HttpResponse listTypeDetails(HttpRequest request, String modelName, String signatureAndOutput) {
- return listTypeDetails(request, modelsEvaluator.evaluatorOf(modelName, signatureAndOutput));
+ private void listFunctions(HttpRequest request, Model model, Cursor cursor) {
+ Cursor functions = cursor.setArray("functions");
+ for (ExpressionFunction func : model.functions()) {
+ Cursor function = functions.addObject();
+ listFunctionDetails(request, model, new String[] { func.getName() }, function);
+ }
}
- private HttpResponse listTypeDetails(HttpRequest request, FunctionEvaluator evaluator) {
- Slime slime = new Slime();
- Cursor root = slime.setObject();
- Cursor bindings = root.setArray("bindings");
+ private void listFunctionDetails(HttpRequest request, Model model, String[] function, Cursor cursor) {
+ String compactedFunction = String.join(".", function);
+ FunctionEvaluator evaluator = model.evaluatorOf(function);
+ cursor.setString("function", compactedFunction);
+ cursor.setString("info", baseUrl(request) + model.name() + "/" + compactedFunction);
+ cursor.setString("eval", baseUrl(request) + model.name() + "/" + compactedFunction + "/" + EVALUATE);
+ Cursor bindings = cursor.setArray("bindings");
for (String bindingName : evaluator.context().names()) {
// TODO: Use an API which exposes only the external binding names instead of this
if (bindingName.startsWith("constant(")) {
@@ -129,26 +118,9 @@ public class ModelsEvaluationHandler extends ThreadedHttpRequestHandler {
continue;
}
Cursor binding = bindings.addObject();
- binding.setString("name", bindingName);
+ binding.setString("binding", bindingName);
binding.setString("type", ""); // TODO: implement type information when available
}
- return new Response(200, com.yahoo.slime.JsonFormat.toJsonBytes(slime));
- }
-
- private HttpResponse evaluateModel(HttpRequest request, String modelName) {
- return evaluateModel(request, modelsEvaluator.evaluatorOf(modelName));
- }
-
- private HttpResponse evaluateModel(HttpRequest request, String modelName, String signatureAndOutput) {
- return evaluateModel(request, modelsEvaluator.evaluatorOf(modelName, signatureAndOutput));
- }
-
- private HttpResponse evaluateModel(HttpRequest request, FunctionEvaluator evaluator) {
- for (String bindingName : evaluator.context().names()) {
- property(request, bindingName).ifPresent(s -> evaluator.bind(bindingName, Tensor.from(s)));
- }
- Tensor result = evaluator.evaluate();
- return new Response(200, JsonFormat.encode(result));
}
private Optional<String> property(HttpRequest request, String name) {
@@ -178,8 +150,17 @@ public class ModelsEvaluationHandler extends ThreadedHttpRequestHandler {
return (index < 0 || index >= segments.length) ? Optional.empty() : Optional.of(segments[index]);
}
- int segments() {
- return segments.length;
+ Optional<Integer> lastIndexOf(String segment) {
+ for (int i = segments.length - 1; i >= 0; --i) {
+ if (segments[i].equalsIgnoreCase(segment)) {
+ return Optional.of(i);
+ }
+ }
+ return Optional.empty();
+ }
+
+ public String[] range(int start, Optional<Integer> end) {
+ return Arrays.copyOfRange(segments, start, end.isPresent() ? end.get() : segments.length);
}
private static String[] splitPath(HttpRequest request) {
diff --git a/model-evaluation/src/main/java/ai/vespa/models/handler/package-info.java b/model-evaluation/src/main/java/ai/vespa/models/handler/package-info.java
deleted file mode 100644
index 7978abf2632..00000000000
--- a/model-evaluation/src/main/java/ai/vespa/models/handler/package-info.java
+++ /dev/null
@@ -1,4 +0,0 @@
-@ExportPackage
-package ai.vespa.models.handler;
-
-import com.yahoo.osgi.annotation.ExportPackage; \ No newline at end of file
diff --git a/model-evaluation/src/test/java/ai/vespa/models/handler/ModelsEvaluationHandlerTest.java b/model-evaluation/src/test/java/ai/vespa/models/handler/ModelsEvaluationHandlerTest.java
index 5f045a2feb4..6726f117c05 100644
--- a/model-evaluation/src/test/java/ai/vespa/models/handler/ModelsEvaluationHandlerTest.java
+++ b/model-evaluation/src/test/java/ai/vespa/models/handler/ModelsEvaluationHandlerTest.java
@@ -80,14 +80,14 @@ public class ModelsEvaluationHandlerTest {
@Test
public void testMnistSoftmaxDetails() {
String url = "http://localhost:8080/model-evaluation/v1/mnist_softmax";
- String expected = "{\"bindings\":[{\"name\":\"Placeholder\",\"type\":\"\"}]}"; // only has a single function
+ String expected = "{\"model\":\"mnist_softmax\",\"functions\":[{\"function\":\"default.add\",\"info\":\"http://localhost:8080/model-evaluation/v1/mnist_softmax/default.add\",\"eval\":\"http://localhost:8080/model-evaluation/v1/mnist_softmax/default.add/eval\",\"bindings\":[{\"binding\":\"Placeholder\",\"type\":\"\"}]}]}";
assertResponse(url, 200, expected);
}
@Test
public void testMnistSoftmaxTypeDetails() {
String url = "http://localhost/model-evaluation/v1/mnist_softmax/default.add/";
- String expected = "{\"bindings\":[{\"name\":\"Placeholder\",\"type\":\"\"}]}";
+ String expected = "{\"model\":\"mnist_softmax\",\"function\":\"default.add\",\"info\":\"http://localhost/model-evaluation/v1/mnist_softmax/default.add\",\"eval\":\"http://localhost/model-evaluation/v1/mnist_softmax/default.add/eval\",\"bindings\":[{\"binding\":\"Placeholder\",\"type\":\"\"}]}";
assertResponse(url, 200, expected);
}
@@ -126,21 +126,21 @@ public class ModelsEvaluationHandlerTest {
@Test
public void testMnistSavedDetails() {
String url = "http://localhost:8080/model-evaluation/v1/mnist_saved";
- String expected = "{\"imported_ml_macro_mnist_saved_dnn_hidden1_add\":\"http://localhost:8080/model-evaluation/v1/mnist_saved/imported_ml_macro_mnist_saved_dnn_hidden1_add\",\"serving_default.y\":\"http://localhost:8080/model-evaluation/v1/mnist_saved/serving_default.y\"}";
+ String expected = "{\"model\":\"mnist_saved\",\"functions\":[{\"function\":\"imported_ml_macro_mnist_saved_dnn_hidden1_add\",\"info\":\"http://localhost:8080/model-evaluation/v1/mnist_saved/imported_ml_macro_mnist_saved_dnn_hidden1_add\",\"eval\":\"http://localhost:8080/model-evaluation/v1/mnist_saved/imported_ml_macro_mnist_saved_dnn_hidden1_add/eval\",\"bindings\":[{\"binding\":\"input\",\"type\":\"\"}]},{\"function\":\"serving_default.y\",\"info\":\"http://localhost:8080/model-evaluation/v1/mnist_saved/serving_default.y\",\"eval\":\"http://localhost:8080/model-evaluation/v1/mnist_saved/serving_default.y/eval\",\"bindings\":[{\"binding\":\"input\",\"type\":\"\"}]}]}";
assertResponse(url, 200, expected);
}
@Test
public void testMnistSavedTypeDetails() {
String url = "http://localhost/model-evaluation/v1/mnist_saved/serving_default.y/";
- String expected = "{\"bindings\":[{\"name\":\"input\",\"type\":\"\"}]}";
+ String expected = "{\"model\":\"mnist_saved\",\"function\":\"serving_default.y\",\"info\":\"http://localhost/model-evaluation/v1/mnist_saved/serving_default.y\",\"eval\":\"http://localhost/model-evaluation/v1/mnist_saved/serving_default.y/eval\",\"bindings\":[{\"binding\":\"input\",\"type\":\"\"}]}";
assertResponse(url, 200, expected);
}
@Test
public void testMnistSavedEvaluateDefaultFunctionShouldFail() {
String url = "http://localhost/model-evaluation/v1/mnist_saved/eval";
- String expected = "{\"error\":\"attempt to evaluate model without specifying function\"}";
+ String expected = "{\"error\":\"More than one function is available in model 'mnist_saved', but no name is given. Available functions: imported_ml_macro_mnist_saved_dnn_hidden1_add, serving_default.y\"}";
assertResponse(url, 404, expected);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java
index 90d71c067bc..4a19e5fe215 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperations.java
@@ -3,7 +3,7 @@ package com.yahoo.vespa.hosted.node.admin.docker;
import com.yahoo.vespa.hosted.dockerapi.Container;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
-import com.yahoo.vespa.hosted.dockerapi.Docker;
+import com.yahoo.vespa.hosted.dockerapi.ContainerStats;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
import com.yahoo.vespa.hosted.dockerapi.ProcessResult;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
@@ -36,9 +36,14 @@ public interface DockerOperations {
void stopServicesOnNode(ContainerName containerName);
+ /**
+ * Try to suspend node. Suspending a node means the node should be taken offline,
+ * such that maintenance can be done of the node (upgrading, rebooting, etc),
+ * and such that we will start serving again as soon as possible afterwards.
+ */
void trySuspendNode(ContainerName containerName);
- Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName);
+ Optional<ContainerStats> getContainerStats(ContainerName containerName);
/**
* Returns the list of containers managed by node-admin
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
index a197eafe923..45c2e93c93e 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
@@ -8,10 +8,12 @@ import com.yahoo.system.ProcessExecuter;
import com.yahoo.vespa.hosted.dockerapi.Container;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
import com.yahoo.vespa.hosted.dockerapi.ContainerResources;
+import com.yahoo.vespa.hosted.dockerapi.ContainerStats;
import com.yahoo.vespa.hosted.dockerapi.Docker;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
import com.yahoo.vespa.hosted.dockerapi.DockerNetworkCreator;
import com.yahoo.vespa.hosted.dockerapi.ProcessResult;
+import com.yahoo.vespa.hosted.dockerapi.exception.ContainerNotFoundException;
import com.yahoo.vespa.hosted.node.admin.component.Environment;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.nodeagent.ContainerData;
@@ -23,6 +25,7 @@ import java.net.Inet6Address;
import java.net.InetAddress;
import java.nio.file.Path;
import java.nio.file.Paths;
+import java.time.Duration;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
@@ -87,13 +90,12 @@ public class DockerOperationsImpl implements DockerOperations {
.withAddCapability("SYS_PTRACE") // Needed for gcore, pstack etc.
.withAddCapability("SYS_ADMIN"); // Needed for perf
- if (environment.getNodeType() == NodeType.confighost ||
- environment.getNodeType() == NodeType.proxyhost) {
+ if (environment.getNodeType() == NodeType.confighost || environment.getNodeType() == NodeType.proxyhost) {
command.withVolume("/var/lib/sia", "/var/lib/sia");
}
if (environment.getNodeType() == NodeType.proxyhost) {
- command.withVolume("/opt/yahoo/share/ssl/certs/", "/opt/yahoo/share/ssl/certs/");
+ command.withVolume("/opt/yahoo/share/ssl/certs", "/opt/yahoo/share/ssl/certs");
}
if (environment.getNodeType() == NodeType.host) {
@@ -142,8 +144,6 @@ public class DockerOperationsImpl implements DockerOperations {
logger.info("Creating new container with args: " + command);
command.create();
-
- docker.createContainer(command);
}
void addEtcHosts(ContainerData containerData,
@@ -217,26 +217,6 @@ public class DockerOperationsImpl implements DockerOperations {
}
/**
- * Try to suspend node. Suspending a node means the node should be taken offline,
- * such that maintenance can be done of the node (upgrading, rebooting, etc),
- * and such that we will start serving again as soon as possible afterwards.
- * <p>
- * Any failures are logged and ignored.
- */
- @Override
- public void trySuspendNode(ContainerName containerName) {
- try {
- // TODO: Change to waiting w/o timeout (need separate thread that we can stop).
- executeCommandInContainer(containerName, nodeProgram, "suspend");
- } catch (RuntimeException e) {
- PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName);
- // It's bad to continue as-if nothing happened, but on the other hand if we do not proceed to
- // remove container, we will not be able to upgrade to fix any problems in the suspend logic!
- logger.warning("Failed trying to suspend container " + containerName.asString(), e);
- }
- }
-
- /**
* For macvlan:
* <p>
* Due to a bug in docker (https://github.com/docker/libnetwork/issues/1443), we need to manually set
@@ -304,7 +284,6 @@ public class DockerOperationsImpl implements DockerOperations {
Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e);
throw new RuntimeException(e);
}
-
}
@Override
@@ -323,7 +302,21 @@ public class DockerOperationsImpl implements DockerOperations {
}
@Override
- public Optional<Docker.ContainerStats> getContainerStats(ContainerName containerName) {
+ public void trySuspendNode(ContainerName containerName) {
+ try {
+ executeCommandInContainer(containerName, nodeProgram, "suspend");
+ } catch (ContainerNotFoundException e) {
+ throw e;
+ } catch (RuntimeException e) {
+ PrefixLogger logger = PrefixLogger.getNodeAgentLogger(DockerOperationsImpl.class, containerName);
+ // It's bad to continue as-if nothing happened, but on the other hand if we do not proceed to
+ // remove container, we will not be able to upgrade to fix any problems in the suspend logic!
+ logger.warning("Failed trying to suspend container " + containerName.asString(), e);
+ }
+ }
+
+ @Override
+ public Optional<ContainerStats> getContainerStats(ContainerName containerName) {
return docker.getContainerStats(containerName);
}
@@ -332,9 +325,11 @@ public class DockerOperationsImpl implements DockerOperations {
return docker.getAllContainersManagedBy(MANAGER_NAME);
}
+ // TODO: Remove after migrating to host-admin
@Override
public void deleteUnusedDockerImages() {
- docker.deleteUnusedDockerImages();
+ if (environment.isRunningOnHost()) return;
+ docker.deleteUnusedDockerImages(Collections.emptyList(), Duration.ofHours(1));
}
/**
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
index cdfc8eef798..914836114ad 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
@@ -189,10 +189,8 @@ public class StorageMaintainer {
try {
FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment);
Optional<String> config = filebeatConfigProvider.getConfig(node);
- if (!config.isPresent()) {
- logger.error("Was not able to generate a config for filebeat, ignoring filebeat file creation." + node.toString());
- return;
- }
+ if (!config.isPresent()) return;
+
Path filebeatPath = environment.pathInNodeAdminFromPathInNode(
containerName, Paths.get("/etc/filebeat/filebeat.yml"));
Files.write(filebeatPath, config.get().getBytes());
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index 7c84150009e..ad38306547d 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -6,9 +6,10 @@ import com.yahoo.concurrent.ThreadFactoryFactory;
import com.yahoo.vespa.hosted.dockerapi.Container;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
import com.yahoo.vespa.hosted.dockerapi.ContainerResources;
-import com.yahoo.vespa.hosted.dockerapi.Docker;
-import com.yahoo.vespa.hosted.dockerapi.DockerException;
-import com.yahoo.vespa.hosted.dockerapi.DockerExecTimeoutException;
+import com.yahoo.vespa.hosted.dockerapi.ContainerStats;
+import com.yahoo.vespa.hosted.dockerapi.exception.ContainerNotFoundException;
+import com.yahoo.vespa.hosted.dockerapi.exception.DockerException;
+import com.yahoo.vespa.hosted.dockerapi.exception.DockerExecTimeoutException;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
import com.yahoo.vespa.hosted.dockerapi.ProcessResult;
import com.yahoo.vespa.hosted.dockerapi.metrics.DimensionMetrics;
@@ -209,6 +210,12 @@ public class NodeAgentImpl implements NodeAgent {
logger.info("Stopped");
}
+ /**
+ * Verifies that service is healthy, otherwise throws an exception. The default implementation does
+ * nothing, override if it's necessary to verify that a service is healthy before resuming.
+ */
+ protected void verifyHealth(NodeSpec node) { }
+
void runLocalResumeScriptIfNeeded(NodeSpec node) {
if (! resumeScriptRun) {
storageMaintainer.writeMetricsConfig(containerName, node);
@@ -262,7 +269,7 @@ public class NodeAgentImpl implements NodeAgent {
.flatMap(container -> removeContainerIfNeeded(node, container))
.map(container -> {
shouldRestartServices(node).ifPresent(restartReason -> {
- logger.info("Will restart services for container " + container + ": " + restartReason);
+ logger.info("Will restart services: " + restartReason);
restartServices(node, container);
});
return container;
@@ -283,7 +290,7 @@ public class NodeAgentImpl implements NodeAgent {
private void restartServices(NodeSpec node, Container existingContainer) {
if (existingContainer.state.isRunning() && node.getState() == Node.State.active) {
ContainerName containerName = existingContainer.name;
- logger.info("Restarting services for " + containerName);
+ logger.info("Restarting services");
// Since we are restarting the services we need to suspend the node.
orchestratorSuspendNode();
dockerOperations.restartVespaOnNode(containerName);
@@ -292,9 +299,14 @@ public class NodeAgentImpl implements NodeAgent {
@Override
public void stopServices() {
- logger.info("Stopping services for " + containerName);
- dockerOperations.trySuspendNode(containerName);
- dockerOperations.stopServicesOnNode(containerName);
+ logger.info("Stopping services");
+ if (containerState == ABSENT) return;
+ try {
+ dockerOperations.trySuspendNode(containerName);
+ dockerOperations.stopServicesOnNode(containerName);
+ } catch (ContainerNotFoundException e) {
+ containerState = ABSENT;
+ }
}
private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) {
@@ -324,7 +336,7 @@ public class NodeAgentImpl implements NodeAgent {
private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) {
Optional<String> removeReason = shouldRemoveContainer(node, existingContainer);
if (removeReason.isPresent()) {
- logger.info("Will remove container " + existingContainer + ": " + removeReason.get());
+ logger.info("Will remove container: " + removeReason.get());
if (existingContainer.state.isRunning()) {
if (node.getState() == Node.State.active) {
@@ -378,7 +390,7 @@ public class NodeAgentImpl implements NodeAgent {
try {
monitor.wait(remainder);
} catch (InterruptedException e) {
- logger.error("Interrupted, but ignoring this: " + hostname);
+ logger.error("Interrupted while sleeping before tick, ignoring");
}
} else break;
}
@@ -403,9 +415,12 @@ public class NodeAgentImpl implements NodeAgent {
converged = true;
} catch (OrchestratorException e) {
logger.info(e.getMessage());
+ } catch (ContainerNotFoundException e) {
+ containerState = ABSENT;
+ logger.warning("Container unexpectedly gone, resetting containerState to " + containerState);
} catch (DockerException e) {
numberOfUnhandledException++;
- logger.error("Caught a DockerException, resetting containerState to " + containerState, e);
+ logger.error("Caught a DockerException", e);
} catch (Exception e) {
numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
@@ -468,6 +483,7 @@ public class NodeAgentImpl implements NodeAgent {
aclMaintainer.run();
}
+ verifyHealth(node);
runLocalResumeScriptIfNeeded(node);
athenzCredentialsMaintainer.converge();
@@ -554,7 +570,7 @@ public class NodeAgentImpl implements NodeAgent {
final NodeSpec node = lastNode;
if (node == null || containerState != UNKNOWN) return;
- Optional<Docker.ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
+ Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(containerName);
if (!containerStats.isPresent()) return;
Dimensions.Builder dimensionsBuilder = new Dimensions.Builder()
@@ -566,7 +582,7 @@ public class NodeAgentImpl implements NodeAgent {
dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS"));
Dimensions dimensions = dimensionsBuilder.build();
- Docker.ContainerStats stats = containerStats.get();
+ ContainerStats stats = containerStats.get();
final String APP = MetricReceiverWrapper.APPLICATION_NODE;
final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size();
final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue();
@@ -631,7 +647,7 @@ public class NodeAgentImpl implements NodeAgent {
String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics};
dockerOperations.executeCommandInContainerAsRoot(containerName, 5L, command);
} catch (DockerExecTimeoutException | JsonProcessingException e) {
- logger.warning("Unable to push metrics to container: " + containerName, e);
+ logger.warning("Failed to push metrics to container", e);
}
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java
index 4b4ef05593d..52e3987341c 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerMock.java
@@ -4,11 +4,13 @@ package com.yahoo.vespa.hosted.node.admin.integrationTests;
import com.yahoo.vespa.hosted.dockerapi.Container;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
import com.yahoo.vespa.hosted.dockerapi.ContainerResources;
+import com.yahoo.vespa.hosted.dockerapi.ContainerStats;
import com.yahoo.vespa.hosted.dockerapi.Docker;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
import com.yahoo.vespa.hosted.dockerapi.ProcessResult;
import java.net.InetAddress;
+import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -69,13 +71,6 @@ public class DockerMock implements Docker {
}
@Override
- public void createContainer(CreateContainerCommand createContainerCommand) {
- synchronized (monitor) {
- callOrderVerifier.add("createContainer with " + createContainerCommand.toString());
- }
- }
-
- @Override
public void startContainer(ContainerName containerName) {
synchronized (monitor) {
callOrderVerifier.add("startContainer with " + containerName);
@@ -116,13 +111,8 @@ public class DockerMock implements Docker {
}
@Override
- public void deleteImage(DockerImage dockerImage) {
-
- }
-
- @Override
- public void deleteUnusedDockerImages() {
-
+ public boolean deleteUnusedDockerImages(List<DockerImage> excludes, Duration minImageAgeToDelete) {
+ return false;
}
@Override
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
index ebed20326a3..e0031f9b9b3 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
@@ -8,9 +8,8 @@ import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.dockerapi.Container;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
import com.yahoo.vespa.hosted.dockerapi.ContainerResources;
-import com.yahoo.vespa.hosted.dockerapi.ContainerStatsImpl;
-import com.yahoo.vespa.hosted.dockerapi.Docker;
-import com.yahoo.vespa.hosted.dockerapi.DockerException;
+import com.yahoo.vespa.hosted.dockerapi.ContainerStats;
+import com.yahoo.vespa.hosted.dockerapi.exception.DockerException;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
@@ -29,8 +28,8 @@ import com.yahoo.vespa.hosted.provision.Node;
import org.junit.Test;
import org.mockito.InOrder;
-import java.io.File;
import java.io.IOException;
+import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
@@ -78,7 +77,7 @@ public class NodeAgentImplTest {
private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
private final MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
- private final Docker.ContainerStats emptyContainerStats = new ContainerStatsImpl(Collections.emptyMap(),
+ private final ContainerStats emptyContainerStats = new ContainerStats(Collections.emptyMap(),
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
private final AthenzCredentialsMaintainer athenzCredentialsMaintainer = mock(AthenzCredentialsMaintainer.class);
@@ -598,7 +597,7 @@ public class NodeAgentImplTest {
public void testGetRelevantMetrics() throws Exception {
final ObjectMapper objectMapper = new ObjectMapper();
ClassLoader classLoader = getClass().getClassLoader();
- File statsFile = new File(classLoader.getResource("docker.stats.json").getFile());
+ URL statsFile = classLoader.getResource("docker.stats.json");
Map<String, Map<String, Object>> dockerStats = objectMapper.readValue(statsFile, Map.class);
Map<String, Object> networks = dockerStats.get("networks");
@@ -606,8 +605,8 @@ public class NodeAgentImplTest {
Map<String, Object> cpu_stats = dockerStats.get("cpu_stats");
Map<String, Object> memory_stats = dockerStats.get("memory_stats");
Map<String, Object> blkio_stats = dockerStats.get("blkio_stats");
- Docker.ContainerStats stats1 = new ContainerStatsImpl(networks, precpu_stats, memory_stats, blkio_stats);
- Docker.ContainerStats stats2 = new ContainerStatsImpl(networks, cpu_stats, memory_stats, blkio_stats);
+ ContainerStats stats1 = new ContainerStats(networks, precpu_stats, memory_stats, blkio_stats);
+ ContainerStats stats2 = new ContainerStats(networks, cpu_stats, memory_stats, blkio_stats);
NodeSpec.Owner owner = new NodeSpec.Owner("tester", "testapp", "testinstance");
NodeSpec.Membership membership = new NodeSpec.Membership("clustType", "clustId", "grp", 3, false);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisioner.java
index b6955195dcf..f61d4158253 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisioner.java
@@ -12,6 +12,7 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.service.monitor.application.ConfigServerApplication;
import com.yahoo.vespa.service.monitor.application.ConfigServerHostApplication;
+import com.yahoo.vespa.service.monitor.application.ControllerApplication;
import com.yahoo.vespa.service.monitor.application.HostedVespaApplication;
import com.yahoo.vespa.service.monitor.application.ProxyHostApplication;
@@ -35,7 +36,8 @@ public class InfrastructureProvisioner extends Maintainer {
private static final List<HostedVespaApplication> HOSTED_VESPA_APPLICATIONS = Arrays.asList(
ConfigServerApplication.CONFIG_SERVER_APPLICATION,
ConfigServerHostApplication.CONFIG_SERVER_HOST_APPLICATION,
- ProxyHostApplication.PROXY_HOST_APPLICATION);
+ ProxyHostApplication.PROXY_HOST_APPLICATION,
+ ControllerApplication.CONTROLLER_APPLICATION);
private final Provisioner provisioner;
private final InfrastructureVersions infrastructureVersions;
@@ -101,4 +103,5 @@ public class InfrastructureProvisioner extends Maintainer {
}
return targetVersion;
}
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureVersions.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureVersions.java
index 61783bb4483..31dd5d74404 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureVersions.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureVersions.java
@@ -28,8 +28,14 @@ public class InfrastructureVersions {
}
public void setTargetVersion(NodeType nodeType, Version newTargetVersion, boolean force) {
- if (nodeType != NodeType.config && nodeType != NodeType.confighost && nodeType != NodeType.proxyhost) {
- throw new IllegalArgumentException("Cannot set version for type " + nodeType);
+ switch (nodeType) {
+ case config:
+ case confighost:
+ case proxyhost:
+ case controller:
+ break;
+ default:
+ throw new IllegalArgumentException("Cannot set version for type " + nodeType);
}
if (newTargetVersion.isEmpty()) {
throw new IllegalArgumentException("Invalid target version: " + newTargetVersion.toFullString());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
index 454ede61243..ded19a84f0d 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
@@ -70,7 +70,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
jobControl = new JobControl(nodeRepository.database());
infrastructureVersions = new InfrastructureVersions(nodeRepository.database());
- nodeFailer = new NodeFailer(deployer, hostLivenessTracker, serviceMonitor, nodeRepository, durationFromEnv("fail_grace").orElse(defaults.failGrace), clock, orchestrator, throttlePolicyFromEnv("throttle_policy").orElse(defaults.throttlePolicy), metric, jobControl, configserverConfig);
+ nodeFailer = new NodeFailer(deployer, hostLivenessTracker, serviceMonitor, nodeRepository, durationFromEnv("fail_grace").orElse(defaults.failGrace), clock, orchestrator, throttlePolicyFromEnv().orElse(defaults.throttlePolicy), metric, jobControl, configserverConfig);
periodicApplicationMaintainer = new PeriodicApplicationMaintainer(deployer, nodeRepository, defaults.redeployMaintainerInterval, durationFromEnv("periodic_redeploy_interval").orElse(defaults.periodicRedeployInterval), jobControl);
operatorChangeApplicationMaintainer = new OperatorChangeApplicationMaintainer(deployer, nodeRepository, clock, durationFromEnv("operator_change_redeploy_interval").orElse(defaults.operatorChangeRedeployInterval), jobControl);
reservationExpirer = new ReservationExpirer(nodeRepository, clock, durationFromEnv("reservation_expiry").orElse(defaults.reservationExpiry), jobControl);
@@ -117,8 +117,8 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
return Optional.ofNullable(System.getenv(envPrefix + envVariable)).map(Long::parseLong).map(Duration::ofSeconds);
}
- private static Optional<NodeFailer.ThrottlePolicy> throttlePolicyFromEnv(String envVariable) {
- String policyName = System.getenv(envPrefix + envVariable);
+ private static Optional<NodeFailer.ThrottlePolicy> throttlePolicyFromEnv() {
+ String policyName = System.getenv(envPrefix + "throttle_policy");
try {
return Optional.ofNullable(policyName).map(NodeFailer.ThrottlePolicy::valueOf);
} catch (IllegalArgumentException e) {
@@ -162,26 +162,24 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
operatorChangeRedeployInterval = Duration.ofMinutes(1);
failedExpirerInterval = Duration.ofMinutes(10);
provisionedExpiry = Duration.ofHours(4);
- reservationExpiry = Duration.ofMinutes(20); // Need to be long enough for deployment to be finished for all config model versions
rebootInterval = Duration.ofDays(30);
nodeRetirerInterval = Duration.ofMinutes(30);
metricsInterval = Duration.ofMinutes(1);
infrastructureProvisionInterval = Duration.ofMinutes(3);
throttlePolicy = NodeFailer.ThrottlePolicy.hosted;
- if (zone.environment().isTest())
- retiredExpiry = Duration.ofMinutes(1); // fast turnaround as test envs don't have persistent data
- else
- retiredExpiry = Duration.ofDays(4); // give up migrating data after 4 days
-
- if (zone.environment().equals(Environment.prod) && zone.system() == SystemName.main) {
+ if (zone.environment().equals(Environment.prod) && zone.system() != SystemName.cd) {
inactiveExpiry = Duration.ofHours(4); // enough time for the application owner to discover and redeploy
retiredInterval = Duration.ofMinutes(29);
dirtyExpiry = Duration.ofHours(2); // enough time to clean the node
+ retiredExpiry = Duration.ofDays(4); // give up migrating data after 4 days
+ reservationExpiry = Duration.ofMinutes(20); // Need to be long enough for deployment to be finished for all config model versions
} else {
inactiveExpiry = Duration.ofSeconds(2); // support interactive wipe start over
- retiredInterval = Duration.ofMinutes(5);
+ retiredInterval = Duration.ofMinutes(1);
dirtyExpiry = Duration.ofMinutes(30);
+ retiredExpiry = Duration.ofMinutes(20);
+ reservationExpiry = Duration.ofMinutes(10);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
index dbe6589dd7f..db3db139044 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
@@ -320,12 +320,13 @@ public class NodeSerializer {
static NodeType nodeTypeFromString(String typeString) {
switch (typeString) {
- case "tenant" : return NodeType.tenant;
- case "host" : return NodeType.host;
- case "proxy" : return NodeType.proxy;
- case "proxyhost" : return NodeType.proxyhost;
- case "config" : return NodeType.config;
- case "confighost" : return NodeType.confighost;
+ case "tenant": return NodeType.tenant;
+ case "host": return NodeType.host;
+ case "proxy": return NodeType.proxy;
+ case "proxyhost": return NodeType.proxyhost;
+ case "config": return NodeType.config;
+ case "confighost": return NodeType.confighost;
+ case "controller": return NodeType.controller;
default : throw new IllegalArgumentException("Unknown node type '" + typeString + "'");
}
}
@@ -338,6 +339,7 @@ public class NodeSerializer {
case proxyhost: return "proxyhost";
case config: return "config";
case confighost: return "confighost";
+ case controller: return "controller";
}
throw new IllegalArgumentException("Serialized form of '" + type + "' not defined");
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java
index 970871a4d05..3b7cf857a86 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/NodesResponse.java
@@ -196,6 +196,7 @@ class NodesResponse extends HttpResponse {
case proxyhost: return "proxyhost";
case config: return "config";
case confighost: return "confighost";
+ case controller: return "controller";
default:
throw new RuntimeException("New type added to enum, not implemented in NodesResponse: " + type.name());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisionerTest.java
index f0c4ad2ef2d..642e6adfc75 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisionerTest.java
@@ -2,8 +2,8 @@
package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.component.Version;
-import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.ClusterMembership;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.Provisioner;
import com.yahoo.vespa.hosted.provision.Node;
@@ -13,11 +13,17 @@ import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Allocation;
import com.yahoo.vespa.hosted.provision.node.Generation;
import com.yahoo.vespa.service.monitor.application.ConfigServerApplication;
+import com.yahoo.vespa.service.monitor.application.ControllerApplication;
+import com.yahoo.vespa.service.monitor.application.HostedVespaApplication;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
import java.time.Duration;
+import java.util.Arrays;
import java.util.Optional;
-import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
@@ -26,47 +32,63 @@ import static org.mockito.Mockito.when;
/**
* @author freva
*/
+@RunWith(Parameterized.class)
public class InfrastructureProvisionerTest {
- private final NodeRepositoryTester tester = new NodeRepositoryTester();
+ @Parameters(name = "application={0}")
+ public static Iterable<Object[]> parameters() {
+ return Arrays.asList(
+ new HostedVespaApplication[]{ConfigServerApplication.CONFIG_SERVER_APPLICATION},
+ new HostedVespaApplication[]{ControllerApplication.CONTROLLER_APPLICATION}
+ );
+ }
+ private final NodeRepositoryTester tester = new NodeRepositoryTester();
private final Provisioner provisioner = mock(Provisioner.class);
private final NodeRepository nodeRepository = tester.nodeRepository();
private final InfrastructureVersions infrastructureVersions = mock(InfrastructureVersions.class);
private final InfrastructureProvisioner infrastructureProvisioner = new InfrastructureProvisioner(
provisioner, nodeRepository, infrastructureVersions, Duration.ofDays(99), new JobControl(nodeRepository.database()));
+ private final HostedVespaApplication application;
+ private final NodeType nodeType;
+
+ public InfrastructureProvisionerTest(HostedVespaApplication application) {
+ this.application = application;
+ this.nodeType = application.getCapacity().type();
+ }
+
@Test
public void returns_version_if_usable_nodes_on_old_version() {
Version target = Version.fromString("6.123.456");
Version oldVersion = Version.fromString("6.122.333");
- when(infrastructureVersions.getTargetVersionFor(eq(NodeType.config))).thenReturn(Optional.of(target));
+ when(infrastructureVersions.getTargetVersionFor(eq(nodeType))).thenReturn(Optional.of(target));
addNode(1, Node.State.failed, Optional.of(oldVersion));
addNode(2, Node.State.dirty, Optional.empty());
addNode(3, Node.State.active, Optional.of(oldVersion));
- assertEquals(Optional.of(target), infrastructureProvisioner.getTargetVersion(NodeType.config));
+ assertEquals(Optional.of(target), infrastructureProvisioner.getTargetVersion(nodeType));
}
@Test
public void returns_version_if_has_usable_nodes_without_version() {
Version target = Version.fromString("6.123.456");
Version oldVersion = Version.fromString("6.122.333");
- when(infrastructureVersions.getTargetVersionFor(eq(NodeType.config))).thenReturn(Optional.of(target));
+ when(infrastructureVersions.getTargetVersionFor(eq(nodeType))).thenReturn(Optional.of(target));
addNode(1, Node.State.failed, Optional.of(oldVersion));
addNode(2, Node.State.ready, Optional.empty());
addNode(3, Node.State.active, Optional.of(target));
- assertEquals(Optional.of(target), infrastructureProvisioner.getTargetVersion(NodeType.config));
+ assertEquals(Optional.of(target), infrastructureProvisioner.getTargetVersion(nodeType));
}
@Test
public void returns_empty_if_usable_nodes_on_target_version() {
Version target = Version.fromString("6.123.456");
Version oldVersion = Version.fromString("6.122.333");
- when(infrastructureVersions.getTargetVersionFor(eq(NodeType.config))).thenReturn(Optional.of(target));
+ when(infrastructureVersions.getTargetVersionFor(eq(nodeType))).thenReturn(Optional.of(target));
addNode(1, Node.State.failed, Optional.of(oldVersion));
addNode(2, Node.State.parked, Optional.of(target));
@@ -74,32 +96,31 @@ public class InfrastructureProvisionerTest {
addNode(4, Node.State.inactive, Optional.of(target));
addNode(5, Node.State.dirty, Optional.empty());
- assertEquals(Optional.empty(), infrastructureProvisioner.getTargetVersion(NodeType.config));
+ assertEquals(Optional.empty(), infrastructureProvisioner.getTargetVersion(nodeType));
}
@Test
public void returns_empty_if_no_usable_nodes() {
- when(infrastructureVersions.getTargetVersionFor(eq(NodeType.config))).thenReturn(Optional.of(Version.fromString("6.123.456")));
+ when(infrastructureVersions.getTargetVersionFor(eq(nodeType))).thenReturn(Optional.of(Version.fromString("6.123.456")));
// No nodes in node repo
- assertEquals(Optional.empty(), infrastructureProvisioner.getTargetVersion(NodeType.config));
+ assertEquals(Optional.empty(), infrastructureProvisioner.getTargetVersion(nodeType));
// Add nodes in non-provisionable states
addNode(1, Node.State.dirty, Optional.empty());
addNode(2, Node.State.failed, Optional.empty());
- assertEquals(Optional.empty(), infrastructureProvisioner.getTargetVersion(NodeType.config));
+ assertEquals(Optional.empty(), infrastructureProvisioner.getTargetVersion(nodeType));
}
@Test
public void returns_empty_if_target_version_not_set() {
- when(infrastructureVersions.getTargetVersionFor(eq(NodeType.config))).thenReturn(Optional.empty());
- assertEquals(Optional.empty(), infrastructureProvisioner.getTargetVersion(NodeType.config));
+ when(infrastructureVersions.getTargetVersionFor(eq(nodeType))).thenReturn(Optional.empty());
+ assertEquals(Optional.empty(), infrastructureProvisioner.getTargetVersion(nodeType));
}
private Node addNode(int id, Node.State state, Optional<Version> wantedVespaVersion) {
- Node node = tester.addNode("id-" + id, "node-" + id, "default", NodeType.config);
+ Node node = tester.addNode("id-" + id, "node-" + id, "default", nodeType);
Optional<Node> nodeWithAllocation = wantedVespaVersion.map(version -> {
- ConfigServerApplication application = ConfigServerApplication.CONFIG_SERVER_APPLICATION;
ClusterSpec clusterSpec = ClusterSpec.from(application.getClusterType(), application.getClusterId(), ClusterSpec.Group.from(0), version, false);
ClusterMembership membership = ClusterMembership.from(clusterSpec, 1);
Allocation allocation = new Allocation(application.getApplicationId(), membership, new Generation(0, 0), false);
@@ -107,4 +128,5 @@ public class InfrastructureProvisionerTest {
});
return nodeRepository.database().writeTo(state, nodeWithAllocation.orElse(node), Agent.system, Optional.empty());
}
+
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
index a838d5c7b64..bc810d93f02 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/v2/RestApiTest.java
@@ -508,7 +508,7 @@ public class RestApiTest {
// Initially, no versions are set
assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/"), "{\"versions\":{},\"osVersions\":{}}");
- // Set version for config and confighost
+ // Set version for config, confighost and controller
assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/config",
Utf8.toBytes("{\"version\": \"6.123.456\"}"),
Request.Method.PATCH),
@@ -517,10 +517,15 @@ public class RestApiTest {
Utf8.toBytes("{\"version\": \"6.123.456\"}"),
Request.Method.PATCH),
"{\"message\":\"Set version to 6.123.456 for nodes of type confighost\"}");
+ assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/controller",
+ Utf8.toBytes("{\"version\": \"6.123.456\"}"),
+ Request.Method.PATCH),
+ "{\"message\":\"Set version to 6.123.456 for nodes of type controller\"}");
+
// Verify versions are set
assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/"),
- "{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.123.456\"},\"osVersions\":{}}");
+ "{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.123.456\",\"controller\":\"6.123.456\"},\"osVersions\":{}}");
// Setting empty version fails
assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/confighost",
@@ -529,6 +534,13 @@ public class RestApiTest {
400,
"{\"error-code\":\"BAD_REQUEST\",\"message\":\"Invalid target version: 0.0.0\"}");
+ // Setting version for unsupported node type fails
+ assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/tenant",
+ Utf8.toBytes("{\"version\": \"6.123.456\"}"),
+ Request.Method.PATCH),
+ 400,
+ "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Cannot set version for type tenant\"}");
+
// Omitting version field fails
assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/confighost",
Utf8.toBytes("{}"),
@@ -552,7 +564,7 @@ public class RestApiTest {
// Verify version has been updated
assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/"),
- "{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.123.1\"},\"osVersions\":{}}");
+ "{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.123.1\",\"controller\":\"6.123.456\"},\"osVersions\":{}}");
// Upgrade OS for confighost and host
assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/confighost",
@@ -566,7 +578,7 @@ public class RestApiTest {
// OS versions are set
assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/"),
- "{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.123.1\"},\"osVersions\":{\"host\":\"7.5.2\",\"confighost\":\"7.5.2\"}}");
+ "{\"versions\":{\"config\":\"6.123.456\",\"confighost\":\"6.123.1\",\"controller\":\"6.123.456\"},\"osVersions\":{\"host\":\"7.5.2\",\"confighost\":\"7.5.2\"}}");
// Upgrade OS and Vespa together
assertResponse(new Request("http://localhost:8080/nodes/v2/upgrade/confighost",
diff --git a/parent/pom.xml b/parent/pom.xml
index e6ad2b18df9..96075974567 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -97,7 +97,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
- <version>3.7.0</version>
+ <version>3.8.0</version>
<configuration>
<source>1.8</source>
<target>1.8</target>
diff --git a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
index 8affe6e9cbf..2c9a2b300eb 100644
--- a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
@@ -221,8 +221,8 @@ TEST_F("requireThatStateIsReported", Fixture)
" },\n"
" \"documents\": {\n"
" \"active\": 0,\n"
- " \"indexed\": 0,\n"
- " \"stored\": 0,\n"
+ " \"ready\": 0,\n"
+ " \"total\": 0,\n"
" \"removed\": 0\n"
" }\n"
"}\n",
diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def
index ca1ea67d288..0ba42aad102 100644
--- a/searchcore/src/vespa/searchcore/config/proton.def
+++ b/searchcore/src/vespa/searchcore/config/proton.def
@@ -228,8 +228,8 @@ summary.cache.compression.type enum {NONE, LZ4, ZSTD} default=LZ4
## Control compression level of the summary while in cache.
## LZ4 has normal range 1..9 while ZSTD has range 1..19
-## 9 is a reasonable default for both
-summary.cache.compression.level int default=9
+## 6 is a default for lz4 to prioritize speed.
+summary.cache.compression.level int default=6
## Control if cache entry is updated or ivalidated when changed.
summary.cache.update_strategy enum {INVALIDATE, UPDATE} default=INVALIDATE
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
index 335ee63668b..c618614ea52 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.cpp
@@ -80,7 +80,8 @@ DocumentDBTaggedMetrics::SubDBMetrics::DocumentStoreMetrics::~DocumentStoreMetri
DocumentDBTaggedMetrics::AttributeMetrics::AttributeMetrics(MetricSet *parent)
: MetricSet("attribute", "", "Attribute vector metrics for this document db", parent),
- resourceUsage(this)
+ resourceUsage(this),
+ totalMemoryUsage(this)
{
}
@@ -222,6 +223,17 @@ DocumentDBTaggedMetrics::SessionCacheMetrics::SessionCacheMetrics(metrics::Metri
DocumentDBTaggedMetrics::SessionCacheMetrics::~SessionCacheMetrics() = default;
+DocumentDBTaggedMetrics::DocumentsMetrics::DocumentsMetrics(metrics::MetricSet *parent)
+ : metrics::MetricSet("documents", "", "Metrics for various document counts in this document db", parent),
+ active("active", "", "The number of active / searchable documents in this document db", this),
+ ready("ready", "", "The number of ready documents in this document db", this),
+ total("total", "", "The total number of documents in this documents db (ready + not-ready)", this),
+ removed("removed", "", "The number of removed documents in this document db", this)
+{
+}
+
+DocumentDBTaggedMetrics::DocumentsMetrics::~DocumentsMetrics() = default;
+
DocumentDBTaggedMetrics::DocumentDBTaggedMetrics(const vespalib::string &docTypeName)
: MetricSet("documentdb", {{"documenttype", docTypeName}}, "Document DB metrics", nullptr),
job(this),
@@ -232,7 +244,10 @@ DocumentDBTaggedMetrics::DocumentDBTaggedMetrics(const vespalib::string &docType
removed("removed", this),
threadingService("threading_service", this),
matching(this),
- sessionCache(this)
+ sessionCache(this),
+ documents(this),
+ totalMemoryUsage(this),
+ totalDiskUsage("disk_usage", "", "The total disk usage (in bytes) for this document db", this)
{
}
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h
index 516e6f223bc..94a69be2c7f 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h
+++ b/searchcore/src/vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h
@@ -93,6 +93,7 @@ struct DocumentDBTaggedMetrics : metrics::MetricSet
};
ResourceUsageMetrics resourceUsage;
+ MemoryUsageMetrics totalMemoryUsage;
AttributeMetrics(metrics::MetricSet *parent);
~AttributeMetrics();
@@ -168,6 +169,16 @@ struct DocumentDBTaggedMetrics : metrics::MetricSet
~SessionCacheMetrics();
};
+ struct DocumentsMetrics : metrics::MetricSet {
+ metrics::LongValueMetric active;
+ metrics::LongValueMetric ready;
+ metrics::LongValueMetric total;
+ metrics::LongValueMetric removed;
+
+ DocumentsMetrics(metrics::MetricSet *parent);
+ ~DocumentsMetrics();
+ };
+
JobMetrics job;
AttributeMetrics attribute;
IndexMetrics index;
@@ -177,6 +188,9 @@ struct DocumentDBTaggedMetrics : metrics::MetricSet
ExecutorThreadingServiceMetrics threadingService;
MatchingMetrics matching;
SessionCacheMetrics sessionCache;
+ DocumentsMetrics documents;
+ MemoryUsageMetrics totalMemoryUsage;
+ metrics::LongValueMetric totalDiskUsage;
DocumentDBTaggedMetrics(const vespalib::string &docTypeName);
~DocumentDBTaggedMetrics();
diff --git a/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt
index 2df34312b52..d47e87e9e03 100644
--- a/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt
+++ b/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt
@@ -28,6 +28,7 @@ vespa_add_library(searchcore_server STATIC
documentbucketmover.cpp
documentdb.cpp
documentdb_commit_job.cpp
+ documentdb_metrics_updater.cpp
documentdbconfig.cpp
documentdbconfigscout.cpp
documentdbconfigmanager.cpp
diff --git a/searchcore/src/vespa/searchcore/proton/server/document_db_explorer.cpp b/searchcore/src/vespa/searchcore/proton/server/document_db_explorer.cpp
index 32d7eb7e0f2..0c3772cab5b 100644
--- a/searchcore/src/vespa/searchcore/proton/server/document_db_explorer.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/document_db_explorer.cpp
@@ -32,14 +32,11 @@ DocumentDBExplorer::get_state(const Inserter &inserter, bool full) const
StateReporterUtils::convertToSlime(*_docDb->reportStatus(), ObjectInserter(object, "status"));
}
{
- // TODO(geirst): Avoid const cast by adding const interface to
- // IDocumentMetaStoreContext as seen from IDocumentSubDB.
- DocumentMetaStoreReadGuards dmss
- (const_cast<DocumentSubDBCollection &>(_docDb->getDocumentSubDBs()));
+ DocumentMetaStoreReadGuards dmss(_docDb->getDocumentSubDBs());
Cursor &documents = object.setObject("documents");
documents.setLong("active", dmss.numActiveDocs());
- documents.setLong("indexed", dmss.numIndexedDocs());
- documents.setLong("stored", dmss.numStoredDocs());
+ documents.setLong("ready", dmss.numReadyDocs());
+ documents.setLong("total", dmss.numTotalDocs());
documents.setLong("removed", dmss.numRemovedDocs());
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/document_meta_store_read_guards.cpp b/searchcore/src/vespa/searchcore/proton/server/document_meta_store_read_guards.cpp
index 2335cd01ec8..952910970ae 100644
--- a/searchcore/src/vespa/searchcore/proton/server/document_meta_store_read_guards.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/document_meta_store_read_guards.cpp
@@ -6,7 +6,7 @@
namespace proton {
-DocumentMetaStoreReadGuards::DocumentMetaStoreReadGuards(DocumentSubDBCollection &subDBs)
+DocumentMetaStoreReadGuards::DocumentMetaStoreReadGuards(const DocumentSubDBCollection &subDBs)
: readydms(subDBs.getReadySubDB()->getDocumentMetaStoreContext().getReadGuard()),
notreadydms(subDBs.getNotReadySubDB()->getDocumentMetaStoreContext().getReadGuard()),
remdms(subDBs.getRemSubDB()->getDocumentMetaStoreContext().getReadGuard())
diff --git a/searchcore/src/vespa/searchcore/proton/server/document_meta_store_read_guards.h b/searchcore/src/vespa/searchcore/proton/server/document_meta_store_read_guards.h
index 8dc39d1415d..7baa5576b3b 100644
--- a/searchcore/src/vespa/searchcore/proton/server/document_meta_store_read_guards.h
+++ b/searchcore/src/vespa/searchcore/proton/server/document_meta_store_read_guards.h
@@ -18,17 +18,17 @@ struct DocumentMetaStoreReadGuards
IDocumentMetaStoreContext::IReadGuard::UP notreadydms;
IDocumentMetaStoreContext::IReadGuard::UP remdms;
- DocumentMetaStoreReadGuards(DocumentSubDBCollection &subDBs);
+ DocumentMetaStoreReadGuards(const DocumentSubDBCollection &subDBs);
~DocumentMetaStoreReadGuards();
uint32_t numActiveDocs() const {
return readydms ? readydms->get().getNumActiveLids() : 0;
}
- uint32_t numIndexedDocs() const {
+ uint32_t numReadyDocs() const {
return readydms ? readydms->get().getNumUsedLids() : 0;
}
- uint32_t numStoredDocs() const {
- return numIndexedDocs() + (notreadydms ? notreadydms->get().getNumUsedLids() : 0);
+ uint32_t numTotalDocs() const {
+ return numReadyDocs() + (notreadydms ? notreadydms->get().getNumUsedLids() : 0);
}
uint32_t numRemovedDocs() const {
return remdms ? remdms->get().getNumUsedLids() : 0;
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
index 18d1a760dd6..94ff5b43a3a 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
@@ -136,8 +136,13 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir,
_visibility(_feedHandler, _writeService, _feedView),
_lidSpaceCompactionHandlers(),
_jobTrackers(),
- _lastDocStoreCacheStats(),
- _calc()
+ _calc(),
+ _metricsUpdater(_subDBs,
+ _writeService,
+ _jobTrackers,
+ *_sessionManager,
+ _writeFilter,
+ _state)
{
assert(configSnapshot);
@@ -1010,326 +1015,13 @@ DocumentDB::notifyAllBucketsChanged()
_clusterStateHandler, "notready");
}
-namespace {
-
-void
-updateIndexMetrics(DocumentDBMetricsCollection &metrics, const search::SearchableStats &stats)
-{
- DocumentDBTaggedMetrics::IndexMetrics &indexMetrics = metrics.getTaggedMetrics().index;
- indexMetrics.diskUsage.set(stats.sizeOnDisk());
- indexMetrics.memoryUsage.update(stats.memoryUsage());
- indexMetrics.docsInMemory.set(stats.docsInMemory());
-
- LegacyDocumentDBMetrics::IndexMetrics &legacyIndexMetrics = metrics.getLegacyMetrics().index;
- legacyIndexMetrics.memoryUsage.set(stats.memoryUsage().allocatedBytes());
- legacyIndexMetrics.docsInMemory.set(stats.docsInMemory());
- legacyIndexMetrics.diskUsage.set(stats.sizeOnDisk());
-}
-
-struct TempAttributeMetric
-{
- MemoryUsage _memoryUsage;
- uint64_t _bitVectors;
-
- TempAttributeMetric()
- : _memoryUsage(),
- _bitVectors(0)
- {}
-};
-
-struct TempAttributeMetrics
-{
- typedef std::map<vespalib::string, TempAttributeMetric> AttrMap;
- TempAttributeMetric _total;
- AttrMap _attrs;
-};
-
-bool
-isReadySubDB(const IDocumentSubDB *subDb, const DocumentSubDBCollection &subDbs)
-{
- return subDb == subDbs.getReadySubDB();
-}
-
-bool
-isNotReadySubDB(const IDocumentSubDB *subDb, const DocumentSubDBCollection &subDbs)
-{
- return subDb == subDbs.getNotReadySubDB();
-}
-
-void
-fillTempAttributeMetrics(TempAttributeMetrics &metrics, const vespalib::string &attrName,
- const MemoryUsage &memoryUsage, uint32_t bitVectors)
-{
- metrics._total._memoryUsage.merge(memoryUsage);
- metrics._total._bitVectors += bitVectors;
- TempAttributeMetric &m = metrics._attrs[attrName];
- m._memoryUsage.merge(memoryUsage);
- m._bitVectors += bitVectors;
-}
-
-void
-fillTempAttributeMetrics(TempAttributeMetrics &totalMetrics,
- TempAttributeMetrics &readyMetrics,
- TempAttributeMetrics &notReadyMetrics,
- const DocumentSubDBCollection &subDbs)
-{
- for (const auto subDb : subDbs) {
- proton::IAttributeManager::SP attrMgr(subDb->getAttributeManager());
- if (attrMgr) {
- TempAttributeMetrics *subMetrics =
- (isReadySubDB(subDb, subDbs) ? &readyMetrics :
- (isNotReadySubDB(subDb, subDbs) ? &notReadyMetrics : nullptr));
- std::vector<search::AttributeGuard> list;
- attrMgr->getAttributeListAll(list);
- for (const auto &attr : list) {
- const search::attribute::Status &status = attr->getStatus();
- MemoryUsage memoryUsage(status.getAllocated(), status.getUsed(), status.getDead(), status.getOnHold());
- uint32_t bitVectors = status.getBitVectors();
- fillTempAttributeMetrics(totalMetrics, attr->getName(), memoryUsage, bitVectors);
- if (subMetrics != nullptr) {
- fillTempAttributeMetrics(*subMetrics, attr->getName(), memoryUsage, bitVectors);
- }
- }
- }
- }
-}
-
-void
-updateLegacyAttributeMetrics(LegacyAttributeMetrics &metrics, const TempAttributeMetrics &tmpMetrics)
-{
- for (const auto &attr : tmpMetrics._attrs) {
- LegacyAttributeMetrics::List::Entry *entry = metrics.list.get(attr.first);
- if (entry) {
- entry->memoryUsage.set(attr.second._memoryUsage.allocatedBytes());
- entry->bitVectors.set(attr.second._bitVectors);
- } else {
- LOG(debug, "Could not update metrics for attribute: '%s'", attr.first.c_str());
- }
- }
- metrics.memoryUsage.set(tmpMetrics._total._memoryUsage.allocatedBytes());
- metrics.bitVectors.set(tmpMetrics._total._bitVectors);
-}
-
-void
-updateAttributeMetrics(AttributeMetrics &metrics, const TempAttributeMetrics &tmpMetrics)
-{
- for (const auto &attr : tmpMetrics._attrs) {
- auto entry = metrics.get(attr.first);
- if (entry) {
- entry->memoryUsage.update(attr.second._memoryUsage);
- }
- }
-}
-
-void
-updateAttributeMetrics(DocumentDBMetricsCollection &metrics, const DocumentSubDBCollection &subDbs)
-{
- TempAttributeMetrics totalMetrics;
- TempAttributeMetrics readyMetrics;
- TempAttributeMetrics notReadyMetrics;
- fillTempAttributeMetrics(totalMetrics, readyMetrics, notReadyMetrics, subDbs);
-
- updateLegacyAttributeMetrics(metrics.getLegacyMetrics().attributes, totalMetrics);
- updateLegacyAttributeMetrics(metrics.getLegacyMetrics().ready.attributes, readyMetrics);
- updateLegacyAttributeMetrics(metrics.getLegacyMetrics().notReady.attributes, notReadyMetrics);
-
- updateAttributeMetrics(metrics.getTaggedMetrics().ready.attributes, readyMetrics);
- updateAttributeMetrics(metrics.getTaggedMetrics().notReady.attributes, notReadyMetrics);
-}
-
-namespace {
-
-void
-updateLegacyRankProfileMetrics(LegacyDocumentDBMetrics::MatchingMetrics &matchingMetrics,
- const vespalib::string &rankProfileName,
- const MatchingStats &stats)
-{
- auto itr = matchingMetrics.rank_profiles.find(rankProfileName);
- assert(itr != matchingMetrics.rank_profiles.end());
- itr->second->update(stats);
-}
-
-}
-
-void
-updateMatchingMetrics(DocumentDBMetricsCollection &metrics, const IDocumentSubDB &ready)
-{
- MatchingStats totalStats;
- for (const auto &rankProfile : metrics.getTaggedMetrics().matching.rank_profiles) {
- MatchingStats matchingStats = ready.getMatcherStats(rankProfile.first);
- rankProfile.second->update(matchingStats);
- updateLegacyRankProfileMetrics(metrics.getLegacyMetrics().matching, rankProfile.first, matchingStats);
-
- totalStats.add(matchingStats);
- }
- metrics.getTaggedMetrics().matching.update(totalStats);
- metrics.getLegacyMetrics().matching.update(totalStats);
-}
-
-void
-updateSessionCacheMetrics(DocumentDBMetricsCollection &metrics, proton::matching::SessionManager &sessionManager)
-{
- auto searchStats = sessionManager.getSearchStats();
- metrics.getTaggedMetrics().sessionCache.search.update(searchStats);
-
- auto groupingStats = sessionManager.getGroupingStats();
- metrics.getTaggedMetrics().sessionCache.grouping.update(groupingStats);
- metrics.getLegacyMetrics().sessionManager.update(groupingStats);
-}
-
-void
-updateDocumentStoreCacheHitRate(const CacheStats &current, const CacheStats &last,
- metrics::LongAverageMetric &cacheHitRate)
-{
- if (current.lookups() < last.lookups() || current.hits < last.hits) {
- LOG(warning, "Not adding document store cache hit rate metrics as values calculated "
- "are corrupt. current.lookups=%" PRIu64 ", last.lookups=%" PRIu64 ", current.hits=%" PRIu64 ", last.hits=%" PRIu64 ".",
- current.lookups(), last.lookups(), current.hits, last.hits);
- } else {
- if ((current.lookups() - last.lookups()) > 0xffffffffull
- || (current.hits - last.hits) > 0xffffffffull)
- {
- LOG(warning, "Document store cache hit rate metrics to add are suspiciously high."
- " lookups diff=%" PRIu64 ", hits diff=%" PRIu64 ".",
- current.lookups() - last.lookups(), current.hits - last.hits);
- }
- cacheHitRate.addTotalValueWithCount(current.hits - last.hits, current.lookups() - last.lookups());
- }
-}
-
-void
-updateCountMetric(uint64_t currVal, uint64_t lastVal, metrics::LongCountMetric &metric)
-{
- uint64_t delta = (currVal >= lastVal) ? (currVal - lastVal) : 0;
- metric.inc(delta);
-}
-
-void
-updateDocstoreMetrics(LegacyDocumentDBMetrics::DocstoreMetrics &metrics,
- const DocumentSubDBCollection &sub_dbs,
- CacheStats &lastCacheStats)
-{
- size_t memoryUsage = 0;
- CacheStats cache_stats;
- for (const auto subDb : sub_dbs) {
- const ISummaryManager::SP &summaryMgr = subDb->getSummaryManager();
- if (summaryMgr) {
- cache_stats += summaryMgr->getBackingStore().getCacheStats();
- memoryUsage += summaryMgr->getBackingStore().memoryUsed();
- }
- }
- metrics.memoryUsage.set(memoryUsage);
- updateCountMetric(cache_stats.lookups(), lastCacheStats.lookups(), metrics.cacheLookups);
- updateDocumentStoreCacheHitRate(cache_stats, lastCacheStats, metrics.cacheHitRate);
- metrics.cacheElements.set(cache_stats.elements);
- metrics.cacheMemoryUsed.set(cache_stats.memory_used);
- lastCacheStats = cache_stats;
-}
-
-void
-updateDocumentStoreMetrics(DocumentDBTaggedMetrics::SubDBMetrics::DocumentStoreMetrics &metrics,
- IDocumentSubDB *subDb,
- CacheStats &lastCacheStats)
-{
- const ISummaryManager::SP &summaryMgr = subDb->getSummaryManager();
- search::IDocumentStore &backingStore = summaryMgr->getBackingStore();
- search::DataStoreStorageStats storageStats(backingStore.getStorageStats());
- metrics.diskUsage.set(storageStats.diskUsage());
- metrics.diskBloat.set(storageStats.diskBloat());
- metrics.maxBucketSpread.set(storageStats.maxBucketSpread());
- metrics.memoryUsage.update(backingStore.getMemoryUsage());
-
- search::CacheStats cacheStats = backingStore.getCacheStats();
- metrics.cache.memoryUsage.set(cacheStats.memory_used);
- metrics.cache.elements.set(cacheStats.elements);
- updateDocumentStoreCacheHitRate(cacheStats, lastCacheStats, metrics.cache.hitRate);
- updateCountMetric(cacheStats.lookups(), lastCacheStats.lookups(), metrics.cache.lookups);
- updateCountMetric(cacheStats.invalidations, lastCacheStats.invalidations, metrics.cache.invalidations);
- lastCacheStats = cacheStats;
-}
-
-template <typename MetricSetType>
-void
-updateLidSpaceMetrics(MetricSetType &metrics, const search::IDocumentMetaStore &metaStore)
-{
- LidUsageStats stats = metaStore.getLidUsageStats();
- metrics.lidLimit.set(stats.getLidLimit());
- metrics.usedLids.set(stats.getUsedLids());
- metrics.lowestFreeLid.set(stats.getLowestFreeLid());
- metrics.highestUsedLid.set(stats.getHighestUsedLid());
- metrics.lidBloatFactor.set(stats.getLidBloatFactor());
- metrics.lidFragmentationFactor.set(stats.getLidFragmentationFactor());
-}
-
-} // namespace
-
void
DocumentDB::updateMetrics(DocumentDBMetricsCollection &metrics)
{
if (_state.getState() < DDBState::State::REPLAY_TRANSACTION_LOG) {
return;
}
-
- ExecutorThreadingServiceStats threadingServiceStats = _writeService.getStats();
- updateLegacyMetrics(metrics.getLegacyMetrics(), threadingServiceStats);
- updateIndexMetrics(metrics, _subDBs.getReadySubDB()->getSearchableStats());
- updateAttributeMetrics(metrics, _subDBs);
- updateMatchingMetrics(metrics, *_subDBs.getReadySubDB());
- updateSessionCacheMetrics(metrics, *_sessionManager);
- updateMetrics(metrics.getTaggedMetrics(), threadingServiceStats);
-}
-
-void
-DocumentDB::updateLegacyMetrics(LegacyDocumentDBMetrics &metrics, const ExecutorThreadingServiceStats &threadingServiceStats)
-{
- metrics.executor.update(threadingServiceStats.getMasterExecutorStats());
- metrics.summaryExecutor.update(threadingServiceStats.getSummaryExecutorStats());
- metrics.indexExecutor.update(threadingServiceStats.getIndexExecutorStats());
- updateDocstoreMetrics(metrics.docstore, _subDBs, _lastDocStoreCacheStats.total);
- metrics.numDocs.set(getNumDocs());
-
- DocumentMetaStoreReadGuards dmss(_subDBs);
-
- metrics.numActiveDocs.set(dmss.numActiveDocs());
- metrics.numIndexedDocs.set(dmss.numIndexedDocs());
- metrics.numStoredDocs.set(dmss.numStoredDocs());
- metrics.numRemovedDocs.set(dmss.numRemovedDocs());
-
- updateLidSpaceMetrics(metrics.ready.docMetaStore, dmss.readydms->get());
- updateLidSpaceMetrics(metrics.notReady.docMetaStore, dmss.notreadydms->get());
- updateLidSpaceMetrics(metrics.removed.docMetaStore, dmss.remdms->get());
-
- metrics.numBadConfigs.set(_state.getDelayedConfig() ? 1u : 0u);
-}
-
-void
-DocumentDB::
-updateMetrics(DocumentDBTaggedMetrics::AttributeMetrics &metrics)
-{
- AttributeUsageFilter &writeFilter(_writeFilter);
- AttributeUsageStats attributeUsageStats = writeFilter.getAttributeUsageStats();
- bool feedBlocked = !writeFilter.acceptWriteOperation();
- double enumStoreUsed = attributeUsageStats.enumStoreUsage().getUsage().usage();
- double multiValueUsed = attributeUsageStats.multiValueUsage().getUsage().usage();
- metrics.resourceUsage.enumStore.set(enumStoreUsed);
- metrics.resourceUsage.multiValue.set(multiValueUsed);
- metrics.resourceUsage.feedingBlocked.set(feedBlocked ? 1 : 0);
-}
-
-void
-DocumentDB::updateMetrics(DocumentDBTaggedMetrics &metrics, const ExecutorThreadingServiceStats &threadingServiceStats)
-{
- metrics.threadingService.update(threadingServiceStats);
- _jobTrackers.updateMetrics(metrics.job);
-
- updateMetrics(metrics.attribute);
- updateDocumentStoreMetrics(metrics.ready.documentStore, _subDBs.getReadySubDB(), _lastDocStoreCacheStats.readySubDb);
- updateDocumentStoreMetrics(metrics.removed.documentStore, _subDBs.getRemSubDB(), _lastDocStoreCacheStats.removedSubDb);
- updateDocumentStoreMetrics(metrics.notReady.documentStore, _subDBs.getNotReadySubDB(), _lastDocStoreCacheStats.notReadySubDb);
- DocumentMetaStoreReadGuards dmss(_subDBs);
- updateLidSpaceMetrics(metrics.ready.lidSpace, dmss.readydms->get());
- updateLidSpaceMetrics(metrics.notReady.lidSpace, dmss.notreadydms->get());
- updateLidSpaceMetrics(metrics.removed.lidSpace, dmss.remdms->get());
+ _metricsUpdater.updateMetrics(metrics);
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.h b/searchcore/src/vespa/searchcore/proton/server/documentdb.h
index 996b365cb48..fb23b113f3b 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.h
@@ -6,11 +6,12 @@
#include "configstore.h"
#include "ddbstate.h"
#include "disk_mem_usage_forwarder.h"
+#include "documentdb_metrics_updater.h"
+#include "document_db_config_owner.h"
#include "documentdbconfig.h"
#include "documentsubdbcollection.h"
#include "executorthreadingservice.h"
#include "feedhandler.h"
-#include "document_db_config_owner.h"
#include "i_document_subdb_owner.h"
#include "i_feed_handler_owner.h"
#include "i_lid_space_compaction_handler.h"
@@ -135,10 +136,8 @@ private:
VisibilityHandler _visibility;
ILidSpaceCompactionHandler::Vector _lidSpaceCompactionHandlers;
DocumentDBJobTrackers _jobTrackers;
-
- // Last updated document store cache statistics. Necessary due to metrics implementation is upside down.
- DocumentStoreCacheStats _lastDocStoreCacheStats;
IBucketStateCalculator::SP _calc;
+ DocumentDBMetricsUpdater _metricsUpdater;
void registerReference();
void setActiveConfig(const DocumentDBConfig::SP &config, SerialNum serialNum, int64_t generation);
@@ -206,10 +205,6 @@ private:
virtual void notifyClusterStateChanged(const IBucketStateCalculator::SP &newCalc) override;
void notifyAllBucketsChanged();
- void updateLegacyMetrics(LegacyDocumentDBMetrics &metrics, const ExecutorThreadingServiceStats &threadingServiceStats);
- void updateMetrics(DocumentDBTaggedMetrics &metrics, const ExecutorThreadingServiceStats &threadingServiceStats);
- void updateMetrics(DocumentDBTaggedMetrics::AttributeMetrics &metrics);
-
/*
* Tear down references to this document db (e.g. listeners for
* gid to lid changes) from other document dbs.
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp
new file mode 100644
index 00000000000..71baabd9d6f
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.cpp
@@ -0,0 +1,415 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "ddbstate.h"
+#include "document_meta_store_read_guards.h"
+#include "documentdb_metrics_updater.h"
+#include "documentsubdbcollection.h"
+#include "executorthreadingservice.h"
+#include "idocumentsubdb.h"
+#include <vespa/searchcommon/attribute/status.h>
+#include <vespa/searchcore/proton/attribute/attribute_usage_filter.h>
+#include <vespa/searchcore/proton/attribute/i_attribute_manager.h>
+#include <vespa/searchcore/proton/docsummary/isummarymanager.h>
+#include <vespa/searchcore/proton/matching/matching_stats.h>
+#include <vespa/searchcore/proton/matching/matching_stats.h>
+#include <vespa/searchcore/proton/metrics/documentdb_job_trackers.h>
+#include <vespa/searchcore/proton/metrics/documentdb_metrics_collection.h>
+#include <vespa/searchcore/proton/metrics/executor_threading_service_stats.h>
+#include <vespa/searchlib/attribute/attributevector.h>
+#include <vespa/searchlib/docstore/cachestats.h>
+#include <vespa/searchlib/util/memoryusage.h>
+#include <vespa/searchlib/util/searchable_stats.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".proton.server.documentdb_metrics_updater");
+
+using search::LidUsageStats;
+using search::CacheStats;
+using search::MemoryUsage;
+
+namespace proton {
+
+using matching::MatchingStats;
+
+DocumentDBMetricsUpdater::DocumentDBMetricsUpdater(const DocumentSubDBCollection &subDBs,
+ ExecutorThreadingService &writeService,
+ DocumentDBJobTrackers &jobTrackers,
+ matching::SessionManager &sessionManager,
+ const AttributeUsageFilter &writeFilter,
+ const DDBState &state)
+ : _subDBs(subDBs),
+ _writeService(writeService),
+ _jobTrackers(jobTrackers),
+ _sessionManager(sessionManager),
+ _writeFilter(writeFilter),
+ _state(state)
+{
+}
+
+DocumentDBMetricsUpdater::~DocumentDBMetricsUpdater() = default;
+
+namespace {
+
+struct TotalStats {
+ search::MemoryUsage memoryUsage;
+ uint64_t diskUsage;
+ TotalStats() : memoryUsage(), diskUsage() {}
+};
+
+void
+updateMemoryUsageMetrics(MemoryUsageMetrics &metrics, const MemoryUsage &memoryUsage, TotalStats &totalStats)
+{
+ metrics.update(memoryUsage);
+ totalStats.memoryUsage.merge(memoryUsage);
+}
+
+void
+updateDiskUsageMetric(metrics::LongValueMetric &metric, uint64_t diskUsage, TotalStats &totalStats)
+{
+ metric.set(diskUsage);
+ totalStats.diskUsage += diskUsage;
+}
+
+void
+updateIndexMetrics(DocumentDBMetricsCollection &metrics, const search::SearchableStats &stats, TotalStats &totalStats)
+{
+ DocumentDBTaggedMetrics::IndexMetrics &indexMetrics = metrics.getTaggedMetrics().index;
+ updateDiskUsageMetric(indexMetrics.diskUsage, stats.sizeOnDisk(), totalStats);
+ updateMemoryUsageMetrics(indexMetrics.memoryUsage, stats.memoryUsage(), totalStats);
+ indexMetrics.docsInMemory.set(stats.docsInMemory());
+
+ LegacyDocumentDBMetrics::IndexMetrics &legacyIndexMetrics = metrics.getLegacyMetrics().index;
+ legacyIndexMetrics.memoryUsage.set(stats.memoryUsage().allocatedBytes());
+ legacyIndexMetrics.docsInMemory.set(stats.docsInMemory());
+ legacyIndexMetrics.diskUsage.set(stats.sizeOnDisk());
+}
+
+struct TempAttributeMetric
+{
+ MemoryUsage memoryUsage;
+ uint64_t bitVectors;
+
+ TempAttributeMetric()
+ : memoryUsage(),
+ bitVectors(0)
+ {}
+};
+
+struct TempAttributeMetrics
+{
+ typedef std::map<vespalib::string, TempAttributeMetric> AttrMap;
+ TempAttributeMetric total;
+ AttrMap attrs;
+};
+
+bool
+isReadySubDB(const IDocumentSubDB *subDb, const DocumentSubDBCollection &subDbs)
+{
+ return subDb == subDbs.getReadySubDB();
+}
+
+bool
+isNotReadySubDB(const IDocumentSubDB *subDb, const DocumentSubDBCollection &subDbs)
+{
+ return subDb == subDbs.getNotReadySubDB();
+}
+
+void
+fillTempAttributeMetrics(TempAttributeMetrics &metrics, const vespalib::string &attrName,
+ const MemoryUsage &memoryUsage, uint32_t bitVectors)
+{
+ metrics.total.memoryUsage.merge(memoryUsage);
+ metrics.total.bitVectors += bitVectors;
+ TempAttributeMetric &m = metrics.attrs[attrName];
+ m.memoryUsage.merge(memoryUsage);
+ m.bitVectors += bitVectors;
+}
+
+void
+fillTempAttributeMetrics(TempAttributeMetrics &totalMetrics,
+ TempAttributeMetrics &readyMetrics,
+ TempAttributeMetrics &notReadyMetrics,
+ const DocumentSubDBCollection &subDbs)
+{
+ for (const auto subDb : subDbs) {
+ proton::IAttributeManager::SP attrMgr(subDb->getAttributeManager());
+ if (attrMgr) {
+ TempAttributeMetrics *subMetrics =
+ (isReadySubDB(subDb, subDbs) ? &readyMetrics :
+ (isNotReadySubDB(subDb, subDbs) ? &notReadyMetrics : nullptr));
+ std::vector<search::AttributeGuard> list;
+ attrMgr->getAttributeListAll(list);
+ for (const auto &attr : list) {
+ const search::attribute::Status &status = attr->getStatus();
+ MemoryUsage memoryUsage(status.getAllocated(), status.getUsed(), status.getDead(), status.getOnHold());
+ uint32_t bitVectors = status.getBitVectors();
+ fillTempAttributeMetrics(totalMetrics, attr->getName(), memoryUsage, bitVectors);
+ if (subMetrics != nullptr) {
+ fillTempAttributeMetrics(*subMetrics, attr->getName(), memoryUsage, bitVectors);
+ }
+ }
+ }
+ }
+}
+
+void
+updateLegacyAttributeMetrics(LegacyAttributeMetrics &metrics, const TempAttributeMetrics &tmpMetrics)
+{
+ for (const auto &attr : tmpMetrics.attrs) {
+ LegacyAttributeMetrics::List::Entry *entry = metrics.list.get(attr.first);
+ if (entry) {
+ entry->memoryUsage.set(attr.second.memoryUsage.allocatedBytes());
+ entry->bitVectors.set(attr.second.bitVectors);
+ } else {
+ LOG(debug, "Could not update metrics for attribute: '%s'", attr.first.c_str());
+ }
+ }
+ metrics.memoryUsage.set(tmpMetrics.total.memoryUsage.allocatedBytes());
+ metrics.bitVectors.set(tmpMetrics.total.bitVectors);
+}
+
+void
+updateAttributeMetrics(AttributeMetrics &metrics, const TempAttributeMetrics &tmpMetrics)
+{
+ for (const auto &attr : tmpMetrics.attrs) {
+ auto entry = metrics.get(attr.first);
+ if (entry) {
+ entry->memoryUsage.update(attr.second.memoryUsage);
+ }
+ }
+}
+
+void
+updateAttributeMetrics(DocumentDBMetricsCollection &metrics, const DocumentSubDBCollection &subDbs, TotalStats &totalStats)
+{
+ TempAttributeMetrics totalMetrics;
+ TempAttributeMetrics readyMetrics;
+ TempAttributeMetrics notReadyMetrics;
+ fillTempAttributeMetrics(totalMetrics, readyMetrics, notReadyMetrics, subDbs);
+
+ updateLegacyAttributeMetrics(metrics.getLegacyMetrics().attributes, totalMetrics);
+ updateLegacyAttributeMetrics(metrics.getLegacyMetrics().ready.attributes, readyMetrics);
+ updateLegacyAttributeMetrics(metrics.getLegacyMetrics().notReady.attributes, notReadyMetrics);
+
+ updateAttributeMetrics(metrics.getTaggedMetrics().ready.attributes, readyMetrics);
+ updateAttributeMetrics(metrics.getTaggedMetrics().notReady.attributes, notReadyMetrics);
+ updateMemoryUsageMetrics(metrics.getTaggedMetrics().attribute.totalMemoryUsage, totalMetrics.total.memoryUsage, totalStats);
+}
+
+void
+updateLegacyRankProfileMetrics(LegacyDocumentDBMetrics::MatchingMetrics &matchingMetrics,
+ const vespalib::string &rankProfileName,
+ const MatchingStats &stats)
+{
+ auto itr = matchingMetrics.rank_profiles.find(rankProfileName);
+ assert(itr != matchingMetrics.rank_profiles.end());
+ itr->second->update(stats);
+}
+
+void
+updateMatchingMetrics(DocumentDBMetricsCollection &metrics, const IDocumentSubDB &ready)
+{
+ MatchingStats totalStats;
+ for (const auto &rankProfile : metrics.getTaggedMetrics().matching.rank_profiles) {
+ MatchingStats matchingStats = ready.getMatcherStats(rankProfile.first);
+ rankProfile.second->update(matchingStats);
+ updateLegacyRankProfileMetrics(metrics.getLegacyMetrics().matching, rankProfile.first, matchingStats);
+
+ totalStats.add(matchingStats);
+ }
+ metrics.getTaggedMetrics().matching.update(totalStats);
+ metrics.getLegacyMetrics().matching.update(totalStats);
+}
+
+void
+updateSessionCacheMetrics(DocumentDBMetricsCollection &metrics, proton::matching::SessionManager &sessionManager)
+{
+ auto searchStats = sessionManager.getSearchStats();
+ metrics.getTaggedMetrics().sessionCache.search.update(searchStats);
+
+ auto groupingStats = sessionManager.getGroupingStats();
+ metrics.getTaggedMetrics().sessionCache.grouping.update(groupingStats);
+ metrics.getLegacyMetrics().sessionManager.update(groupingStats);
+}
+
+void
+updateDocumentsMetrics(DocumentDBMetricsCollection &metrics, const DocumentSubDBCollection &subDbs)
+{
+ DocumentMetaStoreReadGuards dms(subDbs);
+ uint32_t active = dms.numActiveDocs();
+ uint32_t ready = dms.numReadyDocs();
+ uint32_t total = dms.numTotalDocs();
+ uint32_t removed = dms.numRemovedDocs();
+
+ auto &docsMetrics = metrics.getTaggedMetrics().documents;
+ docsMetrics.active.set(active);
+ docsMetrics.ready.set(ready);
+ docsMetrics.total.set(total);
+ docsMetrics.removed.set(removed);
+
+ auto &legacyMetrics = metrics.getLegacyMetrics();
+ legacyMetrics.numDocs.set(ready);
+ legacyMetrics.numActiveDocs.set(active);
+ legacyMetrics.numIndexedDocs.set(ready);
+ legacyMetrics.numStoredDocs.set(total);
+ legacyMetrics.numRemovedDocs.set(removed);
+}
+
+void
+updateDocumentStoreCacheHitRate(const CacheStats &current, const CacheStats &last,
+ metrics::LongAverageMetric &cacheHitRate)
+{
+ if (current.lookups() < last.lookups() || current.hits < last.hits) {
+ LOG(warning, "Not adding document store cache hit rate metrics as values calculated "
+ "are corrupt. current.lookups=%" PRIu64 ", last.lookups=%" PRIu64 ", current.hits=%" PRIu64 ", last.hits=%" PRIu64 ".",
+ current.lookups(), last.lookups(), current.hits, last.hits);
+ } else {
+ if ((current.lookups() - last.lookups()) > 0xffffffffull
+ || (current.hits - last.hits) > 0xffffffffull)
+ {
+ LOG(warning, "Document store cache hit rate metrics to add are suspiciously high."
+ " lookups diff=%" PRIu64 ", hits diff=%" PRIu64 ".",
+ current.lookups() - last.lookups(), current.hits - last.hits);
+ }
+ cacheHitRate.addTotalValueWithCount(current.hits - last.hits, current.lookups() - last.lookups());
+ }
+}
+
+void
+updateCountMetric(uint64_t currVal, uint64_t lastVal, metrics::LongCountMetric &metric)
+{
+ uint64_t delta = (currVal >= lastVal) ? (currVal - lastVal) : 0;
+ metric.inc(delta);
+}
+
+void
+updateLegacyDocstoreMetrics(LegacyDocumentDBMetrics::DocstoreMetrics &metrics,
+ const DocumentSubDBCollection &sub_dbs,
+ CacheStats &lastCacheStats)
+{
+ size_t memoryUsage = 0;
+ CacheStats cache_stats;
+ for (const auto subDb : sub_dbs) {
+ const ISummaryManager::SP &summaryMgr = subDb->getSummaryManager();
+ if (summaryMgr) {
+ cache_stats += summaryMgr->getBackingStore().getCacheStats();
+ memoryUsage += summaryMgr->getBackingStore().memoryUsed();
+ }
+ }
+ metrics.memoryUsage.set(memoryUsage);
+ updateCountMetric(cache_stats.lookups(), lastCacheStats.lookups(), metrics.cacheLookups);
+ updateDocumentStoreCacheHitRate(cache_stats, lastCacheStats, metrics.cacheHitRate);
+ metrics.cacheElements.set(cache_stats.elements);
+ metrics.cacheMemoryUsed.set(cache_stats.memory_used);
+ lastCacheStats = cache_stats;
+}
+
+void
+updateDocumentStoreMetrics(DocumentDBTaggedMetrics::SubDBMetrics::DocumentStoreMetrics &metrics,
+ const IDocumentSubDB *subDb,
+ CacheStats &lastCacheStats,
+ TotalStats &totalStats)
+{
+ const ISummaryManager::SP &summaryMgr = subDb->getSummaryManager();
+ search::IDocumentStore &backingStore = summaryMgr->getBackingStore();
+ search::DataStoreStorageStats storageStats(backingStore.getStorageStats());
+ updateDiskUsageMetric(metrics.diskUsage, storageStats.diskUsage(), totalStats);
+ metrics.diskBloat.set(storageStats.diskBloat());
+ metrics.maxBucketSpread.set(storageStats.maxBucketSpread());
+ updateMemoryUsageMetrics(metrics.memoryUsage, backingStore.getMemoryUsage(), totalStats);
+
+ search::CacheStats cacheStats = backingStore.getCacheStats();
+ totalStats.memoryUsage.incAllocatedBytes(cacheStats.memory_used);
+ metrics.cache.memoryUsage.set(cacheStats.memory_used);
+ metrics.cache.elements.set(cacheStats.elements);
+ updateDocumentStoreCacheHitRate(cacheStats, lastCacheStats, metrics.cache.hitRate);
+ updateCountMetric(cacheStats.lookups(), lastCacheStats.lookups(), metrics.cache.lookups);
+ updateCountMetric(cacheStats.invalidations, lastCacheStats.invalidations, metrics.cache.invalidations);
+ lastCacheStats = cacheStats;
+}
+
+void
+updateDocumentStoreMetrics(DocumentDBTaggedMetrics &metrics, const DocumentSubDBCollection &subDBs,
+ DocumentDBMetricsUpdater::DocumentStoreCacheStats &lastDocStoreCacheStats, TotalStats &totalStats)
+{
+ updateDocumentStoreMetrics(metrics.ready.documentStore, subDBs.getReadySubDB(), lastDocStoreCacheStats.readySubDb, totalStats);
+ updateDocumentStoreMetrics(metrics.removed.documentStore, subDBs.getRemSubDB(), lastDocStoreCacheStats.removedSubDb, totalStats);
+ updateDocumentStoreMetrics(metrics.notReady.documentStore, subDBs.getNotReadySubDB(), lastDocStoreCacheStats.notReadySubDb, totalStats);
+}
+
+template <typename MetricSetType>
+void
+updateLidSpaceMetrics(MetricSetType &metrics, const search::IDocumentMetaStore &metaStore)
+{
+ LidUsageStats stats = metaStore.getLidUsageStats();
+ metrics.lidLimit.set(stats.getLidLimit());
+ metrics.usedLids.set(stats.getUsedLids());
+ metrics.lowestFreeLid.set(stats.getLowestFreeLid());
+ metrics.highestUsedLid.set(stats.getHighestUsedLid());
+ metrics.lidBloatFactor.set(stats.getLidBloatFactor());
+ metrics.lidFragmentationFactor.set(stats.getLidFragmentationFactor());
+}
+
+}
+
+void
+DocumentDBMetricsUpdater::updateMetrics(DocumentDBMetricsCollection &metrics)
+{
+ TotalStats totalStats;
+ ExecutorThreadingServiceStats threadingServiceStats = _writeService.getStats();
+ updateLegacyMetrics(metrics.getLegacyMetrics(), threadingServiceStats);
+ updateIndexMetrics(metrics, _subDBs.getReadySubDB()->getSearchableStats(), totalStats);
+ updateAttributeMetrics(metrics, _subDBs, totalStats);
+ updateMatchingMetrics(metrics, *_subDBs.getReadySubDB());
+ updateSessionCacheMetrics(metrics, _sessionManager);
+ updateDocumentsMetrics(metrics, _subDBs);
+ updateDocumentStoreMetrics(metrics.getTaggedMetrics(), _subDBs, _lastDocStoreCacheStats, totalStats);
+ updateMiscMetrics(metrics.getTaggedMetrics(), threadingServiceStats);
+
+ metrics.getTaggedMetrics().totalMemoryUsage.update(totalStats.memoryUsage);
+ metrics.getTaggedMetrics().totalDiskUsage.set(totalStats.diskUsage);
+}
+
+void
+DocumentDBMetricsUpdater::updateLegacyMetrics(LegacyDocumentDBMetrics &metrics, const ExecutorThreadingServiceStats &threadingServiceStats)
+{
+ metrics.executor.update(threadingServiceStats.getMasterExecutorStats());
+ metrics.summaryExecutor.update(threadingServiceStats.getSummaryExecutorStats());
+ metrics.indexExecutor.update(threadingServiceStats.getIndexExecutorStats());
+ updateLegacyDocstoreMetrics(metrics.docstore, _subDBs, _lastDocStoreCacheStats.total);
+
+ DocumentMetaStoreReadGuards dmss(_subDBs);
+ updateLidSpaceMetrics(metrics.ready.docMetaStore, dmss.readydms->get());
+ updateLidSpaceMetrics(metrics.notReady.docMetaStore, dmss.notreadydms->get());
+ updateLidSpaceMetrics(metrics.removed.docMetaStore, dmss.remdms->get());
+
+ metrics.numBadConfigs.set(_state.getDelayedConfig() ? 1u : 0u);
+}
+
+void
+DocumentDBMetricsUpdater::updateAttributeResourceUsageMetrics(DocumentDBTaggedMetrics::AttributeMetrics &metrics)
+{
+ AttributeUsageStats attributeUsageStats = _writeFilter.getAttributeUsageStats();
+ bool feedBlocked = !_writeFilter.acceptWriteOperation();
+ double enumStoreUsed = attributeUsageStats.enumStoreUsage().getUsage().usage();
+ double multiValueUsed = attributeUsageStats.multiValueUsage().getUsage().usage();
+ metrics.resourceUsage.enumStore.set(enumStoreUsed);
+ metrics.resourceUsage.multiValue.set(multiValueUsed);
+ metrics.resourceUsage.feedingBlocked.set(feedBlocked ? 1 : 0);
+}
+
+void
+DocumentDBMetricsUpdater::updateMiscMetrics(DocumentDBTaggedMetrics &metrics, const ExecutorThreadingServiceStats &threadingServiceStats)
+{
+ metrics.threadingService.update(threadingServiceStats);
+ _jobTrackers.updateMetrics(metrics.job);
+
+ updateAttributeResourceUsageMetrics(metrics.attribute);
+
+ DocumentMetaStoreReadGuards dmss(_subDBs);
+ updateLidSpaceMetrics(metrics.ready.lidSpace, dmss.readydms->get());
+ updateLidSpaceMetrics(metrics.notReady.lidSpace, dmss.notreadydms->get());
+ updateLidSpaceMetrics(metrics.removed.lidSpace, dmss.remdms->get());
+}
+
+}
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.h b/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.h
new file mode 100644
index 00000000000..e0042207060
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb_metrics_updater.h
@@ -0,0 +1,61 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/searchcore/proton/metrics/documentdb_tagged_metrics.h>
+#include <vespa/searchlib/docstore/cachestats.h>
+
+namespace proton {
+
+namespace matching { class SessionManager; }
+
+class AttributeUsageFilter;
+class DDBState;
+class DocumentDBJobTrackers;
+class DocumentDBMetricsCollection;
+class DocumentSubDBCollection;
+class ExecutorThreadingService;
+class ExecutorThreadingServiceStats;
+class LegacyDocumentDBMetrics;
+
+/**
+ * Class used to update metrics for a document db.
+ */
+class DocumentDBMetricsUpdater {
+public:
+
+ struct DocumentStoreCacheStats {
+ search::CacheStats total;
+ search::CacheStats readySubDb;
+ search::CacheStats notReadySubDb;
+ search::CacheStats removedSubDb;
+ DocumentStoreCacheStats() : total(), readySubDb(), notReadySubDb(), removedSubDb() {}
+ };
+
+private:
+ const DocumentSubDBCollection &_subDBs;
+ ExecutorThreadingService &_writeService;
+ DocumentDBJobTrackers &_jobTrackers;
+ matching::SessionManager &_sessionManager;
+ const AttributeUsageFilter &_writeFilter;
+ const DDBState &_state;
+ // Last updated document store cache statistics. Necessary due to metrics implementation is upside down.
+ DocumentStoreCacheStats _lastDocStoreCacheStats;
+
+ void updateLegacyMetrics(LegacyDocumentDBMetrics &metrics, const ExecutorThreadingServiceStats &threadingServiceStats);
+ void updateMiscMetrics(DocumentDBTaggedMetrics &metrics, const ExecutorThreadingServiceStats &threadingServiceStats);
+ void updateAttributeResourceUsageMetrics(DocumentDBTaggedMetrics::AttributeMetrics &metrics);
+
+public:
+ DocumentDBMetricsUpdater(const DocumentSubDBCollection &subDBs,
+ ExecutorThreadingService &writeService,
+ DocumentDBJobTrackers &jobTrackers,
+ matching::SessionManager &sessionManager,
+ const AttributeUsageFilter &writeFilter,
+ const DDBState &state);
+ ~DocumentDBMetricsUpdater();
+
+ void updateMetrics(DocumentDBMetricsCollection &metrics);
+
+};
+
+}
diff --git a/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h b/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h
index d41c2088518..736edcf3c5e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h
@@ -84,6 +84,7 @@ public:
virtual const std::shared_ptr<ISummaryAdapter> &getSummaryAdapter() const = 0;
virtual const std::shared_ptr<IIndexWriter> &getIndexWriter() const = 0;
virtual IDocumentMetaStoreContext &getDocumentMetaStoreContext() = 0;
+ virtual const IDocumentMetaStoreContext &getDocumentMetaStoreContext() const = 0;
virtual IFlushTargetList getFlushTargets() = 0;
virtual size_t getNumDocs() const = 0;
virtual size_t getNumActiveDocs() const = 0;
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
index 4a21537f134..a54525377a9 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
@@ -228,6 +228,7 @@ public:
const ISummaryAdapter::SP & getSummaryAdapter() const override { return _summaryAdapter; }
const std::shared_ptr<IIndexWriter> & getIndexWriter() const override;
IDocumentMetaStoreContext & getDocumentMetaStoreContext() override { return *_metaStoreCtx; }
+ const IDocumentMetaStoreContext &getDocumentMetaStoreContext() const override { return *_metaStoreCtx; }
size_t getNumDocs() const override;
size_t getNumActiveDocs() const override;
bool hasDocument(const document::DocumentId &id) override;
diff --git a/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h b/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h
index 66ae296566f..c2595a85fb7 100644
--- a/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h
+++ b/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h
@@ -67,6 +67,7 @@ struct DummyDocumentSubDb : public IDocumentSubDB
const ISummaryAdapter::SP &getSummaryAdapter() const override { return _summaryAdapter; }
const IIndexWriter::SP &getIndexWriter() const override { return _indexWriter; }
IDocumentMetaStoreContext &getDocumentMetaStoreContext() override { return _metaStoreCtx; }
+ const IDocumentMetaStoreContext &getDocumentMetaStoreContext() const override { return _metaStoreCtx; }
IFlushTargetList getFlushTargets() override { return IFlushTargetList(); }
size_t getNumDocs() const override { return 0; }
size_t getNumActiveDocs() const override { return 0; }
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/BatchNormImportTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/BatchNormImportTestCase.java
index 62bbc9ae81f..593e7b54c10 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/BatchNormImportTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/BatchNormImportTestCase.java
@@ -25,7 +25,7 @@ public class BatchNormImportTestCase {
assertNotNull(output);
assertEquals("dnn/batch_normalization_3/batchnorm/add_1", output.getBody().getName());
model.assertEqualResult("X", output.getBody().getName());
- assertEquals("{x=tensor(d0[],d1[784])}", output.arguments().toString());
+ assertEquals("{x=tensor(d0[],d1[784])}", output.argumentTypes().toString());
}
}
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/DropoutImportTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/DropoutImportTestCase.java
index 2a894adc92c..59712c0152f 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/DropoutImportTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/DropoutImportTestCase.java
@@ -36,7 +36,7 @@ public class DropoutImportTestCase {
assertEquals("join(join(imported_ml_function_test_outputs_BiasAdd, reduce(constant(test_outputs_Const), sum, d1), f(a,b)(a * b)), imported_ml_function_test_outputs_BiasAdd, f(a,b)(max(a,b)))",
output.getBody().getRoot().toString());
model.assertEqualResult("X", output.getBody().getName());
- assertEquals("{x=tensor(d0[],d1[784])}", output.getBody().toString());
+ assertEquals("{x=tensor(d0[],d1[784])}", output.argumentTypes().toString());
}
}
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/OnnxMnistSoftmaxImportTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/OnnxMnistSoftmaxImportTestCase.java
index bcdfde67dc0..b6e83404ab1 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/OnnxMnistSoftmaxImportTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/OnnxMnistSoftmaxImportTestCase.java
@@ -49,7 +49,7 @@ public class OnnxMnistSoftmaxImportTestCase {
output.getBody().getRoot().toString());
assertEquals(TensorType.fromSpec("tensor(d0[],d1[784])"),
model.inputs().get(model.defaultSignature().inputs().get("Placeholder")));
- assertEquals("{Placeholder=tensor(d0[],d1[784])}", output.getBody().toString());
+ assertEquals("{Placeholder=tensor(d0[],d1[784])}", output.argumentTypes().toString());
}
@Test
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/TensorFlowMnistSoftmaxImportTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/TensorFlowMnistSoftmaxImportTestCase.java
index b14a4a5b430..0a48ecfce21 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/TensorFlowMnistSoftmaxImportTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/integration/ml/TensorFlowMnistSoftmaxImportTestCase.java
@@ -62,7 +62,7 @@ public class TensorFlowMnistSoftmaxImportTestCase {
assertEquals("add", output.getBody().getName());
assertEquals("join(reduce(join(rename(Placeholder, (d0, d1), (d0, d2)), constant(test_Variable_read), f(a,b)(a * b)), sum, d2), constant(test_Variable_1_read), f(a,b)(a + b))",
output.getBody().getRoot().toString());
- assertEquals("{x=tensor(d0[],d1[784])}", output.getBody().toString());
+ assertEquals("{x=tensor(d0[],d1[784])}", output.argumentTypes().toString());
// Test execution
model.assertEqualResult("Placeholder", "MatMul");
diff --git a/searchlib/src/tests/docstore/document_store/document_store_test.cpp b/searchlib/src/tests/docstore/document_store/document_store_test.cpp
index 0ef04d0e722..649fb675dca 100644
--- a/searchlib/src/tests/docstore/document_store/document_store_test.cpp
+++ b/searchlib/src/tests/docstore/document_store/document_store_test.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/searchlib/docstore/logdocumentstore.h>
+#include <vespa/searchlib/docstore/value.h>
#include <vespa/searchlib/docstore/cachestats.h>
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/document/fieldvalue/document.h>
@@ -81,4 +82,70 @@ TEST("require that LogDocumentStore::Config equality operator detects inequality
EXPECT_FALSE(C(DC(), LC().setMaxBucketSpread(7)) == C());
}
+using search::docstore::Value;
+vespalib::stringref S1("this is a string long enough to be compressed and is just used for sanity checking of compression"
+ "Adding some repeatble sequences like aaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbb to ensure compression"
+ "xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz xyz");
+
+Value createValue(vespalib::stringref s, const CompressionConfig & cfg) {
+ Value v(7);
+ vespalib::DataBuffer input;
+ input.writeBytes(s.data(), s.size());
+ v.set(std::move(input), s.size(), cfg);
+ return v;
+}
+void verifyValue(vespalib::stringref s, const Value & v) {
+ Value::Result result = v.decompressed();
+ ASSERT_TRUE(result.second);
+ EXPECT_EQUAL(s.size(), v.getUncompressedSize());
+ EXPECT_EQUAL(7u, v.getSyncToken());
+ EXPECT_EQUAL(0, memcmp(s.data(), result.first.getData(), result.first.getDataLen()));
+}
+
+TEST("require that Value and cache entries have expected size") {
+ using pair = std::pair<DocumentIdT, Value>;
+ using Node = vespalib::hash_node<pair>;
+ EXPECT_EQUAL(64ul, sizeof(Value));
+ EXPECT_EQUAL(72ul, sizeof(pair));
+ EXPECT_EQUAL(80ul, sizeof(Node));
+}
+
+TEST("require that Value can store uncompressed data") {
+ Value v = createValue(S1, CompressionConfig::NONE);
+ verifyValue(S1, v);
+}
+
+TEST("require that Value can be moved") {
+ Value v = createValue(S1, CompressionConfig::NONE);
+ Value m = std::move(v);
+ verifyValue(S1, m);
+}
+
+TEST("require that Value can be copied") {
+ Value v = createValue(S1, CompressionConfig::NONE);
+ Value copy(v);
+ verifyValue(S1, v);
+ verifyValue(S1, copy);
+}
+
+TEST("require that Value can store lz4 compressed data") {
+ Value v = createValue(S1, CompressionConfig::LZ4);
+ EXPECT_EQUAL(CompressionConfig::LZ4, v.getCompression());
+ EXPECT_EQUAL(164u, v.size());
+ verifyValue(S1, v);
+}
+
+TEST("require that Value can store zstd compressed data") {
+ Value v = createValue(S1, CompressionConfig::ZSTD);
+ EXPECT_EQUAL(CompressionConfig::ZSTD, v.getCompression());
+ EXPECT_EQUAL(128u, v.size());
+ verifyValue(S1, v);
+}
+
+TEST("require that Value can detect if output not equal to input") {
+ Value v = createValue(S1, CompressionConfig::NONE);
+ static_cast<uint8_t *>(v.get())[8] ^= 0xff;
+ EXPECT_FALSE(v.decompressed().second);
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
index 34046f551d9..eb49a556bdc 100644
--- a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
+++ b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
@@ -622,7 +622,7 @@ TEST("test that the integrated visit cache works.") {
for (size_t i(1); i <= 100; i++) {
vcs.verifyRead(i);
}
- constexpr size_t BASE_SZ = 21374;
+ constexpr size_t BASE_SZ = 22174;
TEST_DO(verifyCacheStats(ds.getCacheStats(), 0, 100, 100, BASE_SZ));
for (size_t i(1); i <= 100; i++) {
vcs.verifyRead(i);
@@ -642,22 +642,22 @@ TEST("test that the integrated visit cache works.") {
vcs.verifyVisit({7,9,17,19,67,88}, true);
TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 102, 99, BASE_SZ+130));
vcs.verifyVisit({7,9,17,19,67,88,89}, true);
- TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 103, 99, BASE_SZ+201));
+ TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 103, 99, BASE_SZ+180));
vcs.rewrite(17);
- TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 103, 97, BASE_SZ-657));
+ TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 103, 97, BASE_SZ-680));
vcs.verifyVisit({7,9,17,19,67,88,89}, true);
- TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 104, 98, BASE_SZ-3));
+ TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 104, 98, BASE_SZ-20));
vcs.remove(17);
- TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 104, 97, BASE_SZ-657));
+ TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 104, 97, BASE_SZ-680));
vcs.verifyVisit({7,9,17,19,67,88,89}, {7,9,19,67,88,89}, true);
- TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 105, 98, BASE_SZ-64));
+ TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 105, 98, BASE_SZ-90));
vcs.verifyVisit({41, 42}, true);
- TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 106, 99, BASE_SZ+238));
+ TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 106, 99, BASE_SZ+210));
vcs.verifyVisit({43, 44}, true);
- TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 107, 100, BASE_SZ+540));
+ TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 107, 100, BASE_SZ+520));
vcs.verifyVisit({41, 42, 43, 44}, true);
- TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 108, 99, BASE_SZ+362));
+ TEST_DO(verifyCacheStats(ds.getCacheStats(), 101, 108, 99, BASE_SZ+340));
}
TEST("testWriteRead") {
diff --git a/searchlib/src/vespa/searchlib/docstore/CMakeLists.txt b/searchlib/src/vespa/searchlib/docstore/CMakeLists.txt
index 735b836cf17..2b82d9e5af7 100644
--- a/searchlib/src/vespa/searchlib/docstore/CMakeLists.txt
+++ b/searchlib/src/vespa/searchlib/docstore/CMakeLists.txt
@@ -18,6 +18,7 @@ vespa_add_library(searchlib_docstore OBJECT
randreaders.cpp
storebybucket.cpp
summaryexceptions.cpp
+ value.cpp
visitcache.cpp
writeablefilechunk.cpp
DEPENDS
diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
index 95b6c3b1584..003f448ab07 100644
--- a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
@@ -4,15 +4,18 @@
#include "documentstore.h"
#include "visitcache.h"
#include "ibucketizer.h"
+#include "value.h"
#include <vespa/document/fieldvalue/document.h>
#include <vespa/vespalib/stllike/cache.hpp>
#include <vespa/vespalib/data/databuffer.h>
#include <vespa/vespalib/util/compressor.h>
+#include <vespa/log/log.h>
+
+LOG_SETUP(".searchlib.docstore.documentstore");
+
using document::DocumentTypeRepo;
using vespalib::compression::CompressionConfig;
-using vespalib::compression::compress;
-using vespalib::compression::decompress;
namespace search {
@@ -39,82 +42,18 @@ DocumentVisitorAdapter::visit(uint32_t lid, vespalib::ConstBufferRef buf) {
}
}
+document::Document::UP
+deserializeDocument(const vespalib::DataBuffer & uncompressed, const DocumentTypeRepo &repo) {
+ vespalib::nbostream is(uncompressed.getData(), uncompressed.getDataLen());
+ return std::make_unique<document::Document>(repo, is);
+}
+
}
using vespalib::nbostream;
namespace docstore {
-class Value {
-public:
- using Alloc = vespalib::alloc::Alloc;
- typedef std::unique_ptr<Value> UP;
-
- Value()
- : _syncToken(0),
- _compressedSize(0),
- _uncompressedSize(0),
- _compression(CompressionConfig::NONE)
- {}
-
- Value(uint64_t syncToken)
- : _syncToken(syncToken),
- _compressedSize(0),
- _uncompressedSize(0),
- _compression(CompressionConfig::NONE)
- {}
-
- Value(Value &&rhs) = default;
- Value &operator=(Value &&rhs) = default;
-
- Value(const Value &rhs)
- : _syncToken(rhs._syncToken),
- _compressedSize(rhs._compressedSize),
- _uncompressedSize(rhs._uncompressedSize),
- _compression(rhs._compression),
- _buf(Alloc::alloc(rhs.size()))
- {
- memcpy(get(), rhs.get(), size());
- }
-
- void setCompression(CompressionConfig::Type comp, size_t uncompressedSize) {
- _compression = comp;
- _uncompressedSize = uncompressedSize;
- }
- uint64_t getSyncToken() const { return _syncToken; }
-
- CompressionConfig::Type getCompression() const { return _compression; }
-
- size_t getUncompressedSize() const { return _uncompressedSize; }
-
- /**
- * Compress buffer into temporary buffer and copy temporary buffer to
- * value along with compression config.
- */
- void set(vespalib::DataBuffer &&buf, ssize_t len, const CompressionConfig &compression);
- // Keep buffer uncompressed
- void set(vespalib::DataBuffer &&buf, ssize_t len);
-
- /**
- * Decompress value into temporary buffer and deserialize document from
- * the temporary buffer.
- */
- document::Document::UP deserializeDocument(const DocumentTypeRepo &repo) const;
- vespalib::DataBuffer decompressed() const;
-
- size_t size() const { return _compressedSize; }
- bool empty() const { return size() == 0; }
- operator const void *() const { return _buf.get(); }
- const void *get() const { return _buf.get(); }
- void *get() { return _buf.get(); }
-private:
- uint64_t _syncToken;
- size_t _compressedSize;
- size_t _uncompressedSize;
- CompressionConfig::Type _compression;
- Alloc _buf;
-};
-
class BackingStore {
public:
BackingStore(IDataStore &store, const CompressionConfig &compression) :
@@ -133,45 +72,6 @@ private:
CompressionConfig _compression;
};
-
-void
-Value::set(vespalib::DataBuffer &&buf, ssize_t len) {
- set(std::move(buf), len, CompressionConfig());
-}
-
-void
-Value::set(vespalib::DataBuffer &&buf, ssize_t len, const CompressionConfig &compression) {
- //Underlying buffer must be identical to allow swap.
- vespalib::DataBuffer compressed(buf.getData(), 0u);
- CompressionConfig::Type type = compress(compression, vespalib::ConstBufferRef(buf.getData(), len), compressed, true);
- _compressedSize = compressed.getDataLen();
- if (buf.getData() == compressed.getData()) {
- // Uncompressed so we can just steal the underlying buffer.
- buf.stealBuffer().swap(_buf);
- } else {
- compressed.stealBuffer().swap(_buf);
- }
- assert(((type == CompressionConfig::NONE) &&
- (len == ssize_t(_compressedSize))) ||
- ((type != CompressionConfig::NONE) &&
- (len > ssize_t(_compressedSize))));
- setCompression(type, len);
-}
-
-vespalib::DataBuffer
-Value::decompressed() const {
- vespalib::DataBuffer uncompressed(_buf.get(), (size_t) 0);
- decompress(getCompression(), getUncompressedSize(), vespalib::ConstBufferRef(*this, size()), uncompressed, true);
- return uncompressed;
-}
-
-document::Document::UP
-Value::deserializeDocument(const DocumentTypeRepo &repo) const {
- vespalib::DataBuffer uncompressed(decompressed());
- vespalib::nbostream is(uncompressed.getData(), uncompressed.getDataLen());
- return std::make_unique<document::Document>(repo, is);
-}
-
void
BackingStore::visit(const IDocumentStore::LidVector &lids, const DocumentTypeRepo &repo,
IDocumentVisitor &visitor) const {
@@ -194,8 +94,9 @@ BackingStore::read(DocumentIdT key, Value &value) const {
void
BackingStore::write(DocumentIdT lid, const Value & value)
{
- vespalib::DataBuffer buf = value.decompressed();
- _backingStore.write(value.getSyncToken(), lid, buf.getData(), buf.getDataLen());
+ Value::Result buf = value.decompressed();
+ assert(buf.second);
+ _backingStore.write(value.getSyncToken(), lid, buf.first.getData(), buf.first.getDataLen());
}
void
@@ -203,8 +104,6 @@ BackingStore::reconfigure(const CompressionConfig &compression) {
_compression = compression;
}
-}
-
using CacheParams = vespalib::CacheParam<
vespalib::LruParam<DocumentIdT, docstore::Value>,
docstore::BackingStore,
@@ -216,12 +115,14 @@ public:
Cache(BackingStore & b, size_t maxBytes) : vespalib::cache<CacheParams>(b, maxBytes) { }
};
+}
+
using VisitCache = docstore::VisitCache;
using docstore::Value;
bool
DocumentStore::Config::operator == (const Config &rhs) const {
- return (_maxCacheBytes == rhs._maxCacheBytes) &&
+ return (_maxCacheBytes == rhs._maxCacheBytes) &&
(_allowVisitCaching == rhs._allowVisitCaching) &&
(_initialCacheEntries == rhs._initialCacheEntries) &&
(_updateStrategy == rhs._updateStrategy) &&
@@ -233,9 +134,9 @@ DocumentStore::DocumentStore(const Config & config, IDataStore & store)
: IDocumentStore(),
_config(config),
_backingStore(store),
- _store(new docstore::BackingStore(_backingStore, config.getCompression())),
- _cache(new Cache(*_store, config.getMaxCacheBytes())),
- _visitCache(new VisitCache(store, config.getMaxCacheBytes(), config.getCompression())),
+ _store(std::make_unique<docstore::BackingStore>(_backingStore, config.getCompression())),
+ _cache(std::make_unique<docstore::Cache>(*_store, config.getMaxCacheBytes())),
+ _visitCache(std::make_unique<docstore::VisitCache>(store, config.getMaxCacheBytes(), config.getCompression())),
_uncached_lookups(0)
{
_cache->reserveElements(config.getInitialCacheEntries());
@@ -271,21 +172,32 @@ DocumentStore::visit(const LidVector & lids, const DocumentTypeRepo &repo, IDocu
}
}
-document::Document::UP
+std::unique_ptr<document::Document>
DocumentStore::read(DocumentIdT lid, const DocumentTypeRepo &repo) const
{
- document::Document::UP retval;
Value value;
if (useCache()) {
value = _cache->read(lid);
- } else {
- _uncached_lookups.fetch_add(1);
- _store->read(lid, value);
+ if (value.empty()) {
+ return std::unique_ptr<document::Document>();
+ }
+ Value::Result result = value.decompressed();
+ if ( result.second ) {
+ return deserializeDocument(result.first, repo);
+ } else {
+ LOG(warning, "Summary cache for lid %u is corrupt. Invalidating and reading directly from backing store", lid);
+ _cache->invalidate(lid);
+ }
}
+
+ _uncached_lookups.fetch_add(1);
+ _store->read(lid, value);
if ( ! value.empty() ) {
- retval = value.deserializeDocument(repo);
+ Value::Result result = value.decompressed();
+ assert(result.second);
+ return deserializeDocument(result.first, repo);
}
- return retval;
+ return std::unique_ptr<document::Document>();
}
void
@@ -380,15 +292,12 @@ class DocumentStore::WrapVisitor : public IDataStoreVisitor
public:
void visit(uint32_t lid, const void *buffer, size_t sz) override;
- WrapVisitor(Visitor &visitor,
- const DocumentTypeRepo &repo,
- const CompressionConfig &compresion,
- IDocumentStore &ds,
- uint64_t syncToken);
+ WrapVisitor(Visitor &visitor, const DocumentTypeRepo &repo, const CompressionConfig &compresion,
+ IDocumentStore &ds, uint64_t syncToken);
- inline void rewrite(uint32_t lid, const document::Document &doc);
- inline void rewrite(uint32_t lid);
- inline void visitRemove(uint32_t lid);
+ void rewrite(uint32_t lid, const document::Document &doc);
+ void rewrite(uint32_t lid);
+ void visitRemove(uint32_t lid);
};
@@ -406,34 +315,26 @@ public:
}
};
-
template <>
void
DocumentStore::WrapVisitor<IDocumentStoreReadVisitor>::
-rewrite(uint32_t lid, const document::Document &doc)
+rewrite(uint32_t , const document::Document &)
{
- (void) lid;
- (void) doc;
}
template <>
void
-DocumentStore::WrapVisitor<IDocumentStoreReadVisitor>::
-rewrite(uint32_t lid)
+DocumentStore::WrapVisitor<IDocumentStoreReadVisitor>::rewrite(uint32_t )
{
- (void) lid;
}
-
template <>
void
-DocumentStore::WrapVisitor<IDocumentStoreReadVisitor>::
-visitRemove(uint32_t lid)
+DocumentStore::WrapVisitor<IDocumentStoreReadVisitor>::visitRemove(uint32_t lid)
{
_visitor.visit(lid);
}
-
template <>
void
DocumentStore::WrapVisitor<IDocumentStoreRewriteVisitor>::
@@ -444,33 +345,21 @@ rewrite(uint32_t lid, const document::Document &doc)
template <>
void
-DocumentStore::WrapVisitor<IDocumentStoreRewriteVisitor>::
-rewrite(uint32_t lid)
+DocumentStore::WrapVisitor<IDocumentStoreRewriteVisitor>::rewrite(uint32_t lid)
{
_ds.remove(_syncToken, lid);
}
-
template <>
void
-DocumentStore::WrapVisitor<IDocumentStoreRewriteVisitor>::
-visitRemove(uint32_t lid)
+DocumentStore::WrapVisitor<IDocumentStoreRewriteVisitor>::visitRemove(uint32_t )
{
- (void) lid;
}
-
-
template <class Visitor>
void
-DocumentStore::WrapVisitor<Visitor>::visit(uint32_t lid,
- const void *buffer,
- size_t sz)
+DocumentStore::WrapVisitor<Visitor>::visit(uint32_t lid, const void *buffer, size_t sz)
{
- (void) lid;
- (void) buffer;
- (void) sz;
-
Value value;
vespalib::DataBuffer buf(4096);
buf.clear();
@@ -480,7 +369,7 @@ DocumentStore::WrapVisitor<Visitor>::visit(uint32_t lid,
value.set(std::move(buf), len);
}
if (! value.empty()) {
- std::shared_ptr<document::Document> doc(value.deserializeDocument(_repo));
+ std::shared_ptr<document::Document> doc(deserializeDocument(value.decompressed().first, _repo));
_visitor.visit(lid, doc);
rewrite(lid, *doc);
} else {
@@ -489,14 +378,10 @@ DocumentStore::WrapVisitor<Visitor>::visit(uint32_t lid,
}
}
-
template <class Visitor>
DocumentStore::WrapVisitor<Visitor>::
-WrapVisitor(Visitor &visitor,
- const DocumentTypeRepo &repo,
- const CompressionConfig &compression,
- IDocumentStore &ds,
- uint64_t syncToken)
+WrapVisitor(Visitor &visitor, const DocumentTypeRepo &repo, const CompressionConfig &compression,
+ IDocumentStore &ds, uint64_t syncToken)
: _visitor(visitor),
_repo(repo),
_compression(compression),
@@ -505,7 +390,6 @@ WrapVisitor(Visitor &visitor,
{
}
-
void
DocumentStore::accept(IDocumentStoreReadVisitor &visitor, IDocumentStoreVisitorProgress &visitorProgress,
const DocumentTypeRepo &repo)
@@ -516,7 +400,6 @@ DocumentStore::accept(IDocumentStoreReadVisitor &visitor, IDocumentStoreVisitorP
_backingStore.accept(wrap, wrapVisitorProgress, false);
}
-
void
DocumentStore::accept(IDocumentStoreRewriteVisitor &visitor, IDocumentStoreVisitorProgress &visitorProgress,
const DocumentTypeRepo &repo)
@@ -527,7 +410,6 @@ DocumentStore::accept(IDocumentStoreRewriteVisitor &visitor, IDocumentStoreVisit
_backingStore.accept(wrap, wrapVisitorProgress, true);
}
-
double
DocumentStore::getVisitCost() const
{
@@ -584,5 +466,4 @@ DocumentStore::shrinkLidSpace()
_backingStore.shrinkLidSpace();
}
-} // namespace search
-
+}
diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.h b/searchlib/src/vespa/searchlib/docstore/documentstore.h
index 7bc3ab1c21d..baeed106531 100644
--- a/searchlib/src/vespa/searchlib/docstore/documentstore.h
+++ b/searchlib/src/vespa/searchlib/docstore/documentstore.h
@@ -5,17 +5,13 @@
#include "idocumentstore.h"
#include <vespa/vespalib/util/compressionconfig.h>
-
-namespace search {
-
-namespace docstore {
+namespace search::docstore {
class VisitCache;
class BackingStore;
class Cache;
}
-using docstore::VisitCache;
-using docstore::BackingStore;
-using docstore::Cache;
+
+namespace search {
/**
* Simple document store that contains serialized Document instances.
@@ -67,7 +63,7 @@ public:
* @param baseDir The path to a directory where "simpledocstore.dat" will exist.
**/
DocumentStore(const Config & config, IDataStore & store);
- ~DocumentStore();
+ ~DocumentStore() override;
DocumentUP read(DocumentIdT lid, const document::DocumentTypeRepo &repo) const override;
void visit(const LidVector & lids, const document::DocumentTypeRepo &repo, IDocumentVisitor & visitor) const override;
@@ -111,12 +107,12 @@ private:
template <class> class WrapVisitor;
class WrapVisitorProgress;
- Config _config;
- IDataStore & _backingStore;
- std::unique_ptr<BackingStore> _store;
- std::shared_ptr<Cache> _cache;
- std::shared_ptr<VisitCache> _visitCache;
- mutable std::atomic<uint64_t> _uncached_lookups;
+ Config _config;
+ IDataStore & _backingStore;
+ std::unique_ptr<docstore::BackingStore> _store;
+ std::unique_ptr<docstore::Cache> _cache;
+ std::unique_ptr<docstore::VisitCache> _visitCache;
+ mutable std::atomic<uint64_t> _uncached_lookups;
};
} // namespace search
diff --git a/searchlib/src/vespa/searchlib/docstore/value.cpp b/searchlib/src/vespa/searchlib/docstore/value.cpp
new file mode 100644
index 00000000000..8750413e3bc
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/docstore/value.cpp
@@ -0,0 +1,75 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "value.h"
+#include <vespa/vespalib/data/databuffer.h>
+#include <vespa/vespalib/util/compressor.h>
+#include <vespa/vespalib/xxhash/xxhash.h>
+
+using vespalib::compression::compress;
+using vespalib::compression::decompress;
+
+namespace search::docstore {
+
+Value::Value()
+ : _syncToken(0),
+ _compressedSize(0),
+ _uncompressedSize(0),
+ _uncompressedCrc(0),
+ _compression(CompressionConfig::NONE)
+{}
+
+Value::Value(uint64_t syncToken)
+ : _syncToken(syncToken),
+ _compressedSize(0),
+ _uncompressedSize(0),
+ _uncompressedCrc(0),
+ _compression(CompressionConfig::NONE)
+{}
+
+Value::Value(const Value &rhs)
+ : _syncToken(rhs._syncToken),
+ _compressedSize(rhs._compressedSize),
+ _uncompressedSize(rhs._uncompressedSize),
+ _uncompressedCrc(rhs._uncompressedCrc),
+ _compression(rhs._compression),
+ _buf(Alloc::alloc(rhs.size()))
+{
+ memcpy(get(), rhs.get(), size());
+}
+
+void
+Value::set(vespalib::DataBuffer &&buf, ssize_t len) {
+ set(std::move(buf), len, CompressionConfig());
+}
+
+void
+Value::set(vespalib::DataBuffer &&buf, ssize_t len, const CompressionConfig &compression) {
+ //Underlying buffer must be identical to allow swap.
+ vespalib::DataBuffer compressed(buf.getData(), 0u);
+ vespalib::ConstBufferRef input(buf.getData(), len);
+ CompressionConfig::Type type = compress(compression, input, compressed, true);
+ _compressedSize = compressed.getDataLen();
+ if (buf.getData() == compressed.getData()) {
+ // Uncompressed so we can just steal the underlying buffer.
+ buf.stealBuffer().swap(_buf);
+ } else {
+ compressed.stealBuffer().swap(_buf);
+ }
+ assert(((type == CompressionConfig::NONE) &&
+ (len == ssize_t(_compressedSize))) ||
+ ((type != CompressionConfig::NONE) &&
+ (len > ssize_t(_compressedSize))));
+ _compression = type;
+ _uncompressedSize = len;
+ _uncompressedCrc = XXH64(input.c_str(), input.size(), 0);
+}
+
+Value::Result
+Value::decompressed() const {
+ vespalib::DataBuffer uncompressed(_buf.get(), (size_t) 0);
+ decompress(getCompression(), getUncompressedSize(), vespalib::ConstBufferRef(*this, size()), uncompressed, true);
+ uint64_t crc = XXH64(uncompressed.getData(), uncompressed.getDataLen(), 0);
+ return std::make_pair<vespalib::DataBuffer, bool>(std::move(uncompressed), crc == _uncompressedCrc);
+}
+
+}
diff --git a/searchlib/src/vespa/searchlib/docstore/value.h b/searchlib/src/vespa/searchlib/docstore/value.h
new file mode 100644
index 00000000000..426bcaf0e31
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/docstore/value.h
@@ -0,0 +1,57 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/util/compressionconfig.h>
+#include <vespa/vespalib/data/databuffer.h>
+
+namespace search::docstore {
+
+/**
+ * This class is used to represent a serialized and optionally compressed blob.
+ * Has efficient move/copy operators for use in a cache/stl containers.
+ * Also has crc checks of uncompressed data.
+ */
+class Value {
+public:
+ using Alloc = vespalib::alloc::Alloc;
+ using CompressionConfig = vespalib::compression::CompressionConfig;
+ using Result = std::pair<vespalib::DataBuffer, bool>;
+
+ Value();
+ Value(uint64_t syncToken);
+
+ Value(Value &&rhs) = default;
+ Value &operator=(Value &&rhs) = default;
+
+ Value(const Value &rhs);
+
+ uint64_t getSyncToken() const { return _syncToken; }
+ CompressionConfig::Type getCompression() const { return _compression; }
+ size_t getUncompressedSize() const { return _uncompressedSize; }
+
+ /**
+ * Compress buffer into temporary buffer and copy temporary buffer to
+ * value along with compression config.
+ */
+ void set(vespalib::DataBuffer &&buf, ssize_t len, const CompressionConfig &compression);
+ // Keep buffer uncompressed
+ void set(vespalib::DataBuffer &&buf, ssize_t len);
+
+ Result decompressed() const;
+
+ size_t size() const { return _compressedSize; }
+ bool empty() const { return size() == 0; }
+ operator const void *() const { return _buf.get(); }
+ const void *get() const { return _buf.get(); }
+ void *get() { return _buf.get(); }
+private:
+ uint64_t _syncToken;
+ size_t _compressedSize;
+ size_t _uncompressedSize;
+ uint64_t _uncompressedCrc;
+ CompressionConfig::Type _compression;
+ Alloc _buf;
+};
+
+}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/featurestore.cpp b/searchlib/src/vespa/searchlib/memoryindex/featurestore.cpp
index 583fadae7a6..be8c71de7a9 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/featurestore.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/featurestore.cpp
@@ -11,8 +11,7 @@ constexpr size_t MIN_CLUSTERS = 1024u;
using index::SchemaUtil;
uint64_t
-FeatureStore::writeFeatures(uint32_t packedIndex,
- const DocIdAndFeatures &features)
+FeatureStore::writeFeatures(uint32_t packedIndex, const DocIdAndFeatures &features)
{
_f._fieldsParams = &_fieldsParams[packedIndex];
uint64_t oldOffset = _f.getWriteOffset();
@@ -90,8 +89,7 @@ FeatureStore::FeatureStore(const Schema &schema)
_fieldsParams.resize(_schema.getNumIndexFields());
SchemaUtil::IndexIterator it(_schema);
for(; it.isValid(); ++it) {
- _fieldsParams[it.getIndex()].
- setSchemaParams(_schema, it.getIndex());
+ _fieldsParams[it.getIndex()].setSchemaParams(_schema, it.getIndex());
}
_store.addType(&_type);
_store.initActiveBuffers();
@@ -105,8 +103,7 @@ FeatureStore::~FeatureStore()
std::pair<datastore::EntryRef, uint64_t>
-FeatureStore::addFeatures(uint32_t packedIndex,
- const DocIdAndFeatures &features)
+FeatureStore::addFeatures(uint32_t packedIndex, const DocIdAndFeatures &features)
{
uint64_t oldOffset = writeFeatures(packedIndex, features);
uint64_t newOffset = _f.getWriteOffset();
@@ -117,8 +114,7 @@ FeatureStore::addFeatures(uint32_t packedIndex,
void
-FeatureStore::getFeatures(uint32_t packedIndex, datastore::EntryRef ref,
- DocIdAndFeatures &features)
+FeatureStore::getFeatures(uint32_t packedIndex, datastore::EntryRef ref, DocIdAndFeatures &features)
{
setupForField(packedIndex, _d);
setupForReadFeatures(ref, _d);
@@ -141,8 +137,7 @@ FeatureStore::bitSize(uint32_t packedIndex, datastore::EntryRef ref)
datastore::EntryRef
-FeatureStore::moveFeatures(uint32_t packedIndex,
- datastore::EntryRef ref)
+FeatureStore::moveFeatures(uint32_t packedIndex, datastore::EntryRef ref)
{
uint64_t bitLen = bitSize(packedIndex, ref);
return moveFeatures(ref, bitLen);
diff --git a/searchlib/src/vespa/searchlib/memoryindex/featurestore.h b/searchlib/src/vespa/searchlib/memoryindex/featurestore.h
index ecf21892f78..4ffdf2bc4e7 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/featurestore.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/featurestore.h
@@ -7,9 +7,7 @@
#include <vespa/searchlib/bitcompression/compression.h>
#include <vespa/searchlib/bitcompression/posocccompression.h>
-namespace search {
-
-namespace memoryindex {
+namespace search::memoryindex {
class FeatureStore
{
@@ -54,8 +52,7 @@ private:
* @param features the features to be encoded
* @return the encode offset before writing
*/
- uint64_t
- writeFeatures(uint32_t packedIndex, const DocIdAndFeatures &features);
+ uint64_t writeFeatures(uint32_t packedIndex, const DocIdAndFeatures &features);
/**
* Adds the features from the given buffer to the data store.
@@ -64,8 +61,7 @@ private:
* @param byteLen the byte length of the buffer
* @return the entry ref for the added features
*/
- datastore::EntryRef
- addFeatures(const uint8_t * src, uint64_t byteLen);
+ datastore::EntryRef addFeatures(const uint8_t * src, uint64_t byteLen);
/**
* Adds the features currently in the underlying encode context to the data store.
@@ -74,8 +70,7 @@ private:
* @param endOffset the end offset into the encode context
* @return the entry ref and bit length of the features
*/
- std::pair<datastore::EntryRef, uint64_t>
- addFeatures(uint64_t beginOffset, uint64_t endOffset);
+ std::pair<datastore::EntryRef, uint64_t> addFeatures(uint64_t beginOffset, uint64_t endOffset);
/**
* Moves features to new location, as part of compaction.
@@ -107,9 +102,7 @@ public:
* @return pair with reference to stored features and
* size of encoded features in bits
*/
- std::pair<datastore::EntryRef, uint64_t>
- addFeatures(uint32_t packedIndex,
- const DocIdAndFeatures &features);
+ std::pair<datastore::EntryRef, uint64_t> addFeatures(uint32_t packedIndex, const DocIdAndFeatures &features);
/**
@@ -120,10 +113,7 @@ public:
* @param ref Reference to stored features
* @param features The features to be decoded
*/
- void
- getFeatures(uint32_t packedIndex,
- datastore::EntryRef ref,
- DocIdAndFeatures &features);
+ void getFeatures(uint32_t packedIndex, datastore::EntryRef ref, DocIdAndFeatures &features);
/**
@@ -189,9 +179,7 @@ public:
* @param ref Referennce to stored features
* @return byte address of stored features
*/
- const uint8_t *
- getBits(datastore::EntryRef ref) const
- {
+ const uint8_t *getBits(datastore::EntryRef ref) const {
RefType iRef(ref);
return _store.getBufferEntry<uint8_t>(iRef.bufferId(), iRef.offset());
}
@@ -203,72 +191,22 @@ public:
* @param ref Old reference to stored features
* @return New reference to stored features
*/
- datastore::EntryRef
- moveFeatures(uint32_t packedIndex,
- datastore::EntryRef ref);
+ datastore::EntryRef moveFeatures(uint32_t packedIndex, datastore::EntryRef ref);
/**
* Return a const view of the fields params used by this feature store.
*
* @return const view of fields params.
*/
- const std::vector<PosOccFieldsParams> &
- getFieldsParams() const
- {
- return _fieldsParams;
- }
-
- // Inherit doc from DataStoreBase
- void
- trimHoldLists(generation_t usedGen)
- {
- _store.trimHoldLists(usedGen);
- }
-
- // Inherit doc from DataStoreBase
- void
- transferHoldLists(generation_t generation)
- {
- _store.transferHoldLists(generation);
- }
-
- void
- clearHoldLists()
- {
- _store.clearHoldLists();
- }
-
- // Inherit doc from DataStoreBase
- std::vector<uint32_t>
- startCompact()
- {
- return _store.startCompact(_typeId);
- }
-
- // Inherit doc from DataStoreBase
- void
- finishCompact(const std::vector<uint32_t> & toHold)
- {
- _store.finishCompact(toHold);
- }
-
- // Inherit doc from DataStoreBase
- MemoryUsage
- getMemoryUsage() const
- {
- return _store.getMemoryUsage();
- }
-
- // Inherit doc from DataStoreBase
- datastore::DataStoreBase::MemStats
- getMemStats() const
- {
- return _store.getMemStats();
- }
+ const std::vector<PosOccFieldsParams> &getFieldsParams() const { return _fieldsParams; }
+
+ void trimHoldLists(generation_t usedGen) { _store.trimHoldLists(usedGen); }
+ void transferHoldLists(generation_t generation) { _store.transferHoldLists(generation); }
+ void clearHoldLists() { _store.clearHoldLists();}
+ std::vector<uint32_t> startCompact() { return _store.startCompact(_typeId); }
+ void finishCompact(const std::vector<uint32_t> & toHold) { _store.finishCompact(toHold); }
+ MemoryUsage getMemoryUsage() const { return _store.getMemoryUsage(); }
+ datastore::DataStoreBase::MemStats getMemStats() const { return _store.getMemStats(); }
};
-
-} // namespace search::memoryindex
-} // namespace search
-
-
+}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/iordereddocumentinserter.h b/searchlib/src/vespa/searchlib/memoryindex/iordereddocumentinserter.h
index f36086bd49f..9edd1eb4d3b 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/iordereddocumentinserter.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/iordereddocumentinserter.h
@@ -5,11 +5,9 @@
#include <vespa/vespalib/stllike/string.h>
#include <cstdint>
-namespace search {
+namespace search::index { class DocIdAndFeatures; }
-namespace index { class DocIdAndFeatures; }
-
-namespace memoryindex {
+namespace search::memoryindex {
/**
* Interface class for ordered document inserter.
@@ -49,4 +47,3 @@ public:
};
}
-}
diff --git a/searchlib/src/vespa/searchlib/memoryindex/ordereddocumentinserter.cpp b/searchlib/src/vespa/searchlib/memoryindex/ordereddocumentinserter.cpp
index b1e365c7fd7..1f15bcf1c75 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/ordereddocumentinserter.cpp
+++ b/searchlib/src/vespa/searchlib/memoryindex/ordereddocumentinserter.cpp
@@ -94,6 +94,7 @@ OrderedDocumentInserter::setNextWord(const vespalib::stringref word)
}
if (!_dItr.valid() || cmp(key, _dItr.getKey())) {
datastore::EntryRef wordRef = _fieldIndex.addWord(_word);
+
WordKey insertKey(wordRef);
DictionaryTree &dTree(_fieldIndex.getDictionaryTree());
dTree.insert(_dItr, insertKey, datastore::EntryRef().ref());
diff --git a/searchlib/src/vespa/searchlib/memoryindex/ordereddocumentinserter.h b/searchlib/src/vespa/searchlib/memoryindex/ordereddocumentinserter.h
index 941695b2707..9645c3890e2 100644
--- a/searchlib/src/vespa/searchlib/memoryindex/ordereddocumentinserter.h
+++ b/searchlib/src/vespa/searchlib/memoryindex/ordereddocumentinserter.h
@@ -6,11 +6,7 @@
#include "memoryfieldindex.h"
#include <limits>
-namespace search
-{
-
-namespace memoryindex
-{
+namespace search::memoryindex {
class IDocumentInsertListener;
@@ -52,11 +48,10 @@ class OrderedDocumentInserter : public IOrderedDocumentInserter
public:
OrderedDocumentInserter(MemoryFieldIndex &fieldIndex);
- virtual ~OrderedDocumentInserter();
- virtual void setNextWord(const vespalib::stringref word) override;
- virtual void add(uint32_t docId,
- const index::DocIdAndFeatures &features) override;
- virtual void remove(uint32_t docId) override;
+ ~OrderedDocumentInserter() override;
+ void setNextWord(const vespalib::stringref word) override;
+ void add(uint32_t docId, const index::DocIdAndFeatures &features) override;
+ void remove(uint32_t docId) override;
/*
* Flush pending changes to postinglist for (_word). Also flush
@@ -64,17 +59,15 @@ public:
*
* _dItr is located at correct position.
*/
- virtual void flush() override;
+ void flush() override;
/*
* Rewind iterator, to start new pass.
*/
- virtual void rewind() override;
+ void rewind() override;
// Used by unit test
datastore::EntryRef getWordRef() const;
};
}
-
-}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/application/ControllerApplication.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/application/ControllerApplication.java
new file mode 100644
index 00000000000..c1bc303e792
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/application/ControllerApplication.java
@@ -0,0 +1,18 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor.application;
+
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeType;
+
+/**
+ * @author mpolden
+ */
+public class ControllerApplication extends HostedVespaApplication {
+
+ public static final ControllerApplication CONTROLLER_APPLICATION = new ControllerApplication();
+
+ private ControllerApplication() {
+ super("controller", NodeType.controller, ClusterSpec.Type.container, ClusterSpec.Id.from("controller"));
+ }
+
+}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/application/HostedVespaApplication.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/application/HostedVespaApplication.java
index d1111bdd5d7..23fafa701d9 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/application/HostedVespaApplication.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/application/HostedVespaApplication.java
@@ -60,4 +60,10 @@ public abstract class HostedVespaApplication {
.applicationName(applicationName)
.build();
}
+
+ @Override
+ public String toString() {
+ return applicationId.toString();
+ }
+
}
diff --git a/vespalib/CMakeLists.txt b/vespalib/CMakeLists.txt
index fb3b08b325f..b32c875cb26 100644
--- a/vespalib/CMakeLists.txt
+++ b/vespalib/CMakeLists.txt
@@ -57,6 +57,7 @@ vespa_define_module(
src/tests/net/send_fd
src/tests/net/socket
src/tests/net/socket_spec
+ src/tests/net/tls/direct_buffer_bio
src/tests/net/tls/openssl_impl
src/tests/net/tls/transport_options
src/tests/objects/nbostream
diff --git a/vespalib/src/tests/net/tls/direct_buffer_bio/CMakeLists.txt b/vespalib/src/tests/net/tls/direct_buffer_bio/CMakeLists.txt
new file mode 100644
index 00000000000..cbce4d3651f
--- /dev/null
+++ b/vespalib/src/tests/net/tls/direct_buffer_bio/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespalib_net_tls_direct_buffer_bio_test_app TEST
+ SOURCES
+ direct_buffer_bio_test.cpp
+ DEPENDS
+ vespalib
+)
+vespa_add_test(NAME vespalib_net_tls_direct_buffer_bio_test_app
+ COMMAND vespalib_net_tls_direct_buffer_bio_test_app)
+
diff --git a/vespalib/src/tests/net/tls/direct_buffer_bio/direct_buffer_bio_test.cpp b/vespalib/src/tests/net/tls/direct_buffer_bio/direct_buffer_bio_test.cpp
new file mode 100644
index 00000000000..134df9b17eb
--- /dev/null
+++ b/vespalib/src/tests/net/tls/direct_buffer_bio/direct_buffer_bio_test.cpp
@@ -0,0 +1,138 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/vespalib/net/tls/impl/direct_buffer_bio.h>
+#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <cassert>
+
+using namespace vespalib;
+using namespace vespalib::net::tls::impl;
+
+struct Fixture {
+ BioPtr mutable_bio;
+ BioPtr const_bio;
+ vespalib::string tmp_buf;
+
+ Fixture()
+ : mutable_bio(new_mutable_direct_buffer_bio()),
+ const_bio(new_const_direct_buffer_bio()),
+ tmp_buf('X', 64)
+ {
+ ASSERT_TRUE(mutable_bio && const_bio);
+ }
+};
+
+TEST_F("BIOs without associated buffers return zero pending", Fixture) {
+ EXPECT_EQUAL(0, BIO_pending(f.mutable_bio.get()));
+ EXPECT_EQUAL(0, BIO_pending(f.const_bio.get()));
+}
+
+TEST_F("Const BIO has initial pending equal to size of associated buffer", Fixture) {
+ vespalib::string to_read = "I sure love me some data";
+ ConstBufferViewGuard g(*f.const_bio, &to_read[0], to_read.size());
+ EXPECT_EQUAL(static_cast<int>(to_read.size()), BIO_pending(f.const_bio.get()));
+}
+
+TEST_F("Mutable BIO has initial pending of 0 with associated buffer (pending == written bytes)", Fixture) {
+ MutableBufferViewGuard g(*f.mutable_bio, &f.tmp_buf[0], f.tmp_buf.size());
+ EXPECT_EQUAL(0, BIO_pending(f.mutable_bio.get()));
+}
+
+TEST_F("Mutable BIO_write writes to associated buffer", Fixture) {
+ MutableBufferViewGuard g(*f.mutable_bio, &f.tmp_buf[0], f.tmp_buf.size());
+ vespalib::string to_write = "hello world!";
+ int ret = ::BIO_write(f.mutable_bio.get(), to_write.data(), static_cast<int>(to_write.size()));
+ EXPECT_EQUAL(static_cast<int>(to_write.size()), ret);
+ EXPECT_EQUAL(to_write, vespalib::stringref(f.tmp_buf.data(), to_write.size()));
+ EXPECT_EQUAL(static_cast<int>(to_write.size()), BIO_pending(f.mutable_bio.get()));
+}
+
+TEST_F("Mutable BIO_write moves write cursor per invocation", Fixture) {
+ MutableBufferViewGuard g(*f.mutable_bio, &f.tmp_buf[0], f.tmp_buf.size());
+ vespalib::string to_write = "hello world!";
+
+ int ret = ::BIO_write(f.mutable_bio.get(), to_write.data(), 3); // 'hel'
+ ASSERT_EQUAL(3, ret);
+ EXPECT_EQUAL(3, BIO_pending(f.mutable_bio.get()));
+ ret = ::BIO_write(f.mutable_bio.get(), to_write.data() + 3, 5); // 'lo wo'
+ ASSERT_EQUAL(5, ret);
+ EXPECT_EQUAL(8, BIO_pending(f.mutable_bio.get()));
+ ret = ::BIO_write(f.mutable_bio.get(), to_write.data() + 8, 4); // 'rld!'
+ ASSERT_EQUAL(4, ret);
+ EXPECT_EQUAL(12, BIO_pending(f.mutable_bio.get()));
+
+ EXPECT_EQUAL(to_write, vespalib::stringref(f.tmp_buf.data(), to_write.size()));
+}
+
+TEST_F("Const BIO_read reads from associated buffer", Fixture) {
+ vespalib::string to_read = "look at this fancy data!";
+ ConstBufferViewGuard g(*f.const_bio, &to_read[0], to_read.size());
+
+ int ret = ::BIO_read(f.const_bio.get(), &f.tmp_buf[0], static_cast<int>(f.tmp_buf.size()));
+ EXPECT_EQUAL(static_cast<int>(to_read.size()), ret);
+ EXPECT_EQUAL(ret, static_cast<int>(to_read.size()));
+
+ EXPECT_EQUAL(to_read, vespalib::stringref(f.tmp_buf.data(), to_read.size()));
+}
+
+TEST_F("Const BIO_read moves read cursor per invocation", Fixture) {
+ vespalib::string to_read = "look at this fancy data!";
+ ConstBufferViewGuard g(*f.const_bio, &to_read[0], to_read.size());
+
+ EXPECT_EQUAL(24, BIO_pending(f.const_bio.get()));
+ int ret = ::BIO_read(f.const_bio.get(), &f.tmp_buf[0], 8); // 'look at '
+ ASSERT_EQUAL(8, ret);
+ EXPECT_EQUAL(16, BIO_pending(f.const_bio.get()));
+ ret = ::BIO_read(f.const_bio.get(), &f.tmp_buf[8], 10); // 'this fancy'
+ ASSERT_EQUAL(10, ret);
+ EXPECT_EQUAL(6, BIO_pending(f.const_bio.get()));
+ ret = ::BIO_read(f.const_bio.get(), &f.tmp_buf[18], 20); // ' data!' (with extra destination space available)
+ ASSERT_EQUAL(6, ret);
+ EXPECT_EQUAL(0, BIO_pending(f.const_bio.get()));
+
+ EXPECT_EQUAL(to_read, vespalib::stringref(f.tmp_buf.data(), to_read.size()));
+}
+
+TEST_F("Const BIO read EOF returns -1 by default and sets BIO retry flag", Fixture) {
+ ConstBufferViewGuard g(*f.const_bio, "", 0);
+ int ret = ::BIO_read(f.const_bio.get(), &f.tmp_buf[0], static_cast<int>(f.tmp_buf.size()));
+ EXPECT_EQUAL(-1, ret);
+ EXPECT_NOT_EQUAL(0, BIO_should_retry(f.const_bio.get()));
+}
+
+TEST_F("Can invoke BIO_(set|get)_close", Fixture) {
+ (void)BIO_set_close(f.mutable_bio.get(), 0);
+ EXPECT_EQUAL(0, BIO_get_close(f.mutable_bio.get()));
+ (void)BIO_set_close(f.mutable_bio.get(), 1);
+ EXPECT_EQUAL(1, BIO_get_close(f.mutable_bio.get()));
+}
+
+TEST_F("BIO_write on const BIO returns failure", Fixture) {
+ vespalib::string data = "safe and cozy data :3";
+ vespalib::string to_read = data;
+ ConstBufferViewGuard g(*f.const_bio, &to_read[0], to_read.size());
+
+ int ret = ::BIO_write(f.const_bio.get(), "unsafe", 6);
+ EXPECT_EQUAL(-1, ret);
+ EXPECT_EQUAL(0, BIO_should_retry(f.mutable_bio.get()));
+ EXPECT_EQUAL(data, to_read);
+}
+
+TEST_F("BIO_read on mutable BIO returns failure", Fixture) {
+ MutableBufferViewGuard g(*f.mutable_bio, &f.tmp_buf[0], f.tmp_buf.size());
+
+ vespalib::string dummy_buf;
+ dummy_buf.reserve(128);
+ int ret = ::BIO_read(f.mutable_bio.get(), &dummy_buf[0], static_cast<int>(dummy_buf.size()));
+ EXPECT_EQUAL(-1, ret);
+ EXPECT_EQUAL(0, BIO_should_retry(f.mutable_bio.get()));
+}
+
+TEST_F("Can do read on zero-length nullptr const buffer", Fixture) {
+ ConstBufferViewGuard g(*f.const_bio, nullptr, 0);
+ int ret = ::BIO_read(f.const_bio.get(), &f.tmp_buf[0], static_cast<int>(f.tmp_buf.size()));
+ EXPECT_EQUAL(-1, ret);
+ EXPECT_NOT_EQUAL(0, BIO_should_retry(f.const_bio.get()));
+}
+
+TEST_MAIN() { TEST_RUN_ALL(); }
+
diff --git a/vespalib/src/vespa/vespalib/net/crypto_engine.cpp b/vespalib/src/vespa/vespalib/net/crypto_engine.cpp
index 254a9b213ba..3dd800dadf0 100644
--- a/vespalib/src/vespa/vespalib/net/crypto_engine.cpp
+++ b/vespalib/src/vespa/vespalib/net/crypto_engine.cpp
@@ -12,6 +12,9 @@
#include <vespa/vespalib/data/smart_buffer.h>
#include <assert.h>
+#include <vespa/log/log.h>
+LOG_SETUP(".vespalib.net.crypto_engine");
+
namespace vespalib {
namespace {
@@ -172,6 +175,8 @@ CryptoEngine::SP create_default_crypto_engine() {
if (cfg_file.empty()) {
return std::make_shared<NullCryptoEngine>();
}
+
+ LOG(debug, "Using TLS crypto engine with config file '%s'", cfg_file.c_str());
auto tls_opts = net::tls::read_options_from_json_file(cfg_file);
return std::make_shared<TlsCryptoEngine>(*tls_opts);
}
diff --git a/vespalib/src/vespa/vespalib/net/tls/impl/CMakeLists.txt b/vespalib/src/vespa/vespalib/net/tls/impl/CMakeLists.txt
index a5a8e8d3eb9..6129fde3c6c 100644
--- a/vespalib/src/vespa/vespalib/net/tls/impl/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/net/tls/impl/CMakeLists.txt
@@ -1,6 +1,7 @@
# Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(vespalib_vespalib_net_tls_impl OBJECT
SOURCES
+ direct_buffer_bio.cpp
openssl_tls_context_impl.cpp
openssl_crypto_codec_impl.cpp
DEPENDS
diff --git a/vespalib/src/vespa/vespalib/net/tls/impl/direct_buffer_bio.cpp b/vespalib/src/vespa/vespalib/net/tls/impl/direct_buffer_bio.cpp
new file mode 100644
index 00000000000..614722a9769
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/net/tls/impl/direct_buffer_bio.cpp
@@ -0,0 +1,418 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "direct_buffer_bio.h"
+#include <vespa/vespalib/net/tls/crypto_exception.h>
+#include <utility>
+#include <cassert>
+
+#include <vespa/vespalib/util/backtrace.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".vespalib.net.tls.impl.direct_buffer_bio");
+
+/*
+ * The official OpenSSL docs are basically devoid of information on how to write
+ * your own BIOs, so most of the information used to implement our custom BIOs
+ * is gleaned from other implementations and by reading the OpenSSL source code.
+ *
+ * Primary references used for implementation:
+ * - https://github.com/openssl/openssl/blob/master/crypto/bio/bss_mem.c
+ * - https://github.com/indutny/uv_ssl_t/blob/master/src/bio.c
+ */
+
+namespace vespalib::net::tls::impl {
+
+namespace {
+
+int buffer_bio_init(::BIO* bio);
+int buffer_bio_destroy(::BIO* bio);
+int mutable_buffer_bio_write(::BIO* bio, const char* src_buf, int len);
+int const_buffer_bio_write(::BIO* bio, const char* src_buf, int len);
+int mutable_buffer_bio_read(::BIO* bio, char* dest_buf, int len);
+int const_buffer_bio_read(::BIO* bio, char* dest_buf, int len);
+long mutable_buffer_bio_ctrl(::BIO* bio, int cmd, long num, void* ptr);
+long const_buffer_bio_ctrl(::BIO* bio, int cmd, long num, void* ptr);
+
+// How to wrangle BIOs and their methods is completely changed after OpenSSL 1.1
+// For older versions, we must directly create a struct with callback fields set
+// and can access the BIO fields directly. In 1.1 and beyond everything is hidden
+// by indirection functions (these are _not_ available in prior versions).
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+
+#if !defined(BIO_TYPE_START)
+# define BIO_TYPE_START 128 // Constant hoisted from OpenSSL >= 1.1.0
+#endif
+
+const ::BIO_METHOD mutable_buf_method_instance = {
+ (BIO_TYPE_START + 1) | BIO_TYPE_SOURCE_SINK, // BIO_TYPE_SOURCE_SINK sets high bits, not low bits, so no clobbering
+ "mutable direct buffer access BIO",
+ mutable_buffer_bio_write, // write func
+ mutable_buffer_bio_read, // read func
+ nullptr, // puts func
+ nullptr, // gets func
+ mutable_buffer_bio_ctrl, // ctrl func
+ buffer_bio_init, // init func
+ buffer_bio_destroy, // destroy func
+ nullptr, // callback ctrl func
+};
+
+const ::BIO_METHOD const_buf_method_instance = {
+ (BIO_TYPE_START + 2) | BIO_TYPE_SOURCE_SINK,
+ "const direct buffer access BIO",
+ const_buffer_bio_write, // write func
+ const_buffer_bio_read, // read func
+ nullptr, // puts func
+ nullptr, // gets func
+ const_buffer_bio_ctrl, // ctrl func
+ buffer_bio_init, // init func
+ buffer_bio_destroy, // destroy func
+ nullptr, // callback ctrl func
+};
+
+struct BioMethodWrapper {
+ const ::BIO_METHOD* method; // Global instance
+ int type_index;
+};
+
+BioMethodWrapper mutable_buf_method() {
+ return {&mutable_buf_method_instance, mutable_buf_method_instance.type};
+}
+
+BioMethodWrapper const_buf_method() {
+ return {&const_buf_method_instance, const_buf_method_instance.type};
+}
+
+void set_bio_data(::BIO& bio, void* ptr) {
+ bio.ptr = ptr;
+}
+
+void* get_bio_data(::BIO& bio) {
+ return bio.ptr;
+}
+
+void set_bio_shutdown(::BIO& bio, int shutdown) {
+ bio.shutdown = shutdown;
+}
+
+int get_bio_shutdown(::BIO& bio) {
+ return bio.shutdown;
+}
+
+void set_bio_init(::BIO& bio, int init) {
+ bio.init = init;
+}
+
+#else // OpenSSL 1.1 and beyond
+
+struct BioMethodDeleter {
+ void operator()(::BIO_METHOD* meth) const noexcept {
+ ::BIO_meth_free(meth);
+ }
+};
+using BioMethodPtr = std::unique_ptr<::BIO_METHOD, BioMethodDeleter>;
+
+struct BioMethodWrapper {
+ BioMethodPtr method;
+ int type_index;
+};
+
+struct BioMethodParams {
+ const char* bio_name;
+ int (*bio_write)(::BIO*, const char*, int);
+ int (*bio_read)(::BIO*, char*, int);
+ long (*bio_ctrl)(::BIO*, int, long, void*);
+};
+
+BioMethodWrapper create_bio_method(const BioMethodParams& params) {
+ int type_index = ::BIO_get_new_index() | BIO_TYPE_SOURCE_SINK;
+ if (type_index == -1) {
+ throw CryptoException("BIO_get_new_index");
+ }
+ BioMethodPtr bm(::BIO_meth_new(type_index, params.bio_name));
+ if (!::BIO_meth_set_create(bm.get(), buffer_bio_init) ||
+ !::BIO_meth_set_destroy(bm.get(), buffer_bio_destroy) ||
+ !::BIO_meth_set_write(bm.get(), params.bio_write) ||
+ !::BIO_meth_set_read(bm.get(), params.bio_read) ||
+ !::BIO_meth_set_ctrl(bm.get(), params.bio_ctrl)) {
+ throw CryptoException("Failed to set BIO_METHOD callback");
+ }
+ return {std::move(bm), type_index};
+}
+
+BioMethodWrapper create_mutable_bio_method() {
+ return create_bio_method({"mutable direct buffer access BIO", mutable_buffer_bio_write,
+ mutable_buffer_bio_read, mutable_buffer_bio_ctrl});
+}
+
+BioMethodWrapper create_const_bio_method() {
+ return create_bio_method({"const direct buffer access BIO", const_buffer_bio_write,
+ const_buffer_bio_read, const_buffer_bio_ctrl});
+}
+
+const BioMethodWrapper& mutable_buf_method() {
+ static BioMethodWrapper wrapper = create_mutable_bio_method();
+ return wrapper;
+}
+
+const BioMethodWrapper& const_buf_method() {
+ static BioMethodWrapper wrapper = create_const_bio_method();
+ return wrapper;
+}
+
+void set_bio_data(::BIO& bio, void* ptr) {
+ ::BIO_set_data(&bio, ptr);
+}
+
+void set_bio_shutdown(::BIO& bio, int shutdown) {
+ ::BIO_set_shutdown(&bio, shutdown);
+}
+
+int get_bio_shutdown(::BIO& bio) {
+ return ::BIO_get_shutdown(&bio);
+}
+
+void set_bio_init(::BIO& bio, int init) {
+ ::BIO_set_init(&bio, init);
+}
+
+void* get_bio_data(::BIO& bio) {
+ return ::BIO_get_data(&bio);
+}
+
+#endif
+
+BioPtr new_direct_buffer_bio(const ::BIO_METHOD& method) {
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+ auto* bio = ::BIO_new(const_cast<::BIO_METHOD*>(&method)); // ugh, older OpenSSL const-ness is a disaster.
+#else
+ auto* bio = ::BIO_new(&method);
+#endif
+ if (!bio) {
+ return BioPtr();
+ }
+ set_bio_data(*bio, nullptr); // Just to make sure this isn't set yet.
+ return BioPtr(bio);
+}
+
+} // anon ns
+
+BioPtr new_mutable_direct_buffer_bio() {
+ return new_direct_buffer_bio(*mutable_buf_method().method);
+}
+
+BioPtr new_const_direct_buffer_bio() {
+ return new_direct_buffer_bio(*const_buf_method().method);
+}
+
+namespace {
+
+int buffer_bio_init(::BIO* bio) {
+ // "shutdown" here means "should BIO close underlying resource?". Since
+ // our BIOs don't ever allocate anything we just use this value as something
+ // that can be set by BIO_set_close() and read by BIO_get_close().
+ set_bio_shutdown(*bio, 1);
+ set_bio_init(*bio, 1);
+ return 1;
+}
+
+int buffer_bio_destroy(::BIO* bio) {
+ set_bio_data(*bio, nullptr); // We don't own anything.
+ return 1;
+}
+
+int mutable_buffer_bio_write(::BIO* bio, const char* src_buf, int len) {
+ LOG_ASSERT(len >= 0);
+
+ BIO_clear_retry_flags(bio);
+ if (!get_bio_data(*bio)) {
+ // TODO replace with assertion once we _know_ it should never happen in practice..!
+ LOG(error, "Got buffer write of length %d to a non-bound mutable BIO!", len);
+ LOG(error, "%s", getStackTrace(0).c_str());
+ return -1;
+ }
+
+ const auto sz_len = static_cast<size_t>(len);
+ if (sz_len == 0) {
+ return 0;
+ }
+ auto* dest_buf = static_cast<MutableBufferView*>(get_bio_data(*bio));
+ // sz_len is <= INT32_MAX while pos/size are size_t, so no overflow on 64-bit
+ // since the caller enforces that buffer sizes are < INT32_MAX.
+ if (dest_buf->pos + sz_len > dest_buf->size) {
+ return -1;
+ }
+ // Source and destination buffers should never overlap.
+ memcpy(dest_buf->buffer + dest_buf->pos, src_buf, sz_len);
+ dest_buf->pos += sz_len;
+
+ return len;
+}
+
+int const_buffer_bio_write(::BIO* bio, const char* src_buf, int len) {
+ (void) bio;
+ (void) src_buf;
+ // Const buffers are read only!
+ LOG(error, "BIO_write() of length %d called on read-only BIO", len);
+ return -1;
+}
+
+int mutable_buffer_bio_read(::BIO* bio, char* dest_buf, int len) {
+ (void) bio;
+ (void) dest_buf;
+ // Mutable buffers are write only!
+ LOG(error, "BIO_read() of length %d called on write-only BIO", len);
+ return -1;
+}
+
+int const_buffer_bio_read(::BIO* bio, char* dest_buf, int len) {
+ LOG_ASSERT(len >= 0);
+
+ BIO_clear_retry_flags(bio);
+ if (!get_bio_data(*bio)) {
+ // TODO replace with assertion once we _know_ it should never happen in practice..!
+ LOG(error, "Got buffer read of length %d to a non-bound const BIO!", len);
+ LOG(error, "%s", getStackTrace(0).c_str());
+ return -1;
+ }
+
+ const auto sz_len = static_cast<size_t>(len);
+ auto* src_buf = static_cast<ConstBufferView*>(get_bio_data(*bio));
+ const auto readable = std::min(sz_len, src_buf->size - src_buf->pos);
+ if (readable != 0) {
+ // Source and destination buffers should never overlap.
+ memcpy(dest_buf, src_buf->buffer + src_buf->pos, readable);
+ src_buf->pos += readable;
+ return static_cast<int>(readable);
+ }
+ // Since a BIO might point to different buffers between SSL_* invocations,
+ // we want OpenSSL to retry later. _Not_ setting this or not returning -1 will
+ // cause OpenSSL to return SSL_ERROR_SYSCALL. Ask me how I know.
+ BIO_set_retry_read(bio);
+ return -1;
+}
+
+template <typename BufferType>
+long do_buffer_bio_ctrl(::BIO* bio, int cmd, long num, void* ptr) {
+ const auto* buf_view = static_cast<const BufferType*>(get_bio_data(*bio));
+ long ret = 1;
+
+ switch (cmd) {
+ case BIO_CTRL_EOF: // Is the buffer exhausted?
+ if (buf_view != nullptr) {
+ ret = static_cast<int>(buf_view->pos == buf_view->size);
+ }
+ break;
+ case BIO_CTRL_INFO: // How much data remains in buffer?
+ ret = (buf_view != nullptr) ? buf_view->pending() : 0;
+ if (ptr) {
+ *static_cast<void**>(ptr) = nullptr; // Semantics: who knows? But everyone's doing it!
+ }
+ break;
+ case BIO_CTRL_GET_CLOSE: // Is the BIO in auto close mode?
+ ret = get_bio_shutdown(*bio);
+ break;
+ case BIO_CTRL_SET_CLOSE: // Should the BIO be in auto close mode? Spoiler alert: we don't really care.
+ set_bio_shutdown(*bio, static_cast<int>(num));
+ break;
+ case BIO_CTRL_WPENDING:
+ ret = 0;
+ break;
+ case BIO_CTRL_PENDING:
+ ret = (buf_view != nullptr) ? buf_view->pending() : 0;
+ break;
+ case BIO_CTRL_DUP:
+ case BIO_CTRL_FLUSH:
+ ret = 1; // Same as memory OpenSSL BIO ctrl func.
+ break;
+ case BIO_CTRL_RESET:
+ case BIO_C_SET_BUF_MEM:
+ case BIO_C_GET_BUF_MEM_PTR:
+ case BIO_C_SET_BUF_MEM_EOF_RETURN:
+ LOG_ASSERT(!"Unsupported BIO control function called");
+ case BIO_CTRL_PUSH:
+ case BIO_CTRL_POP:
+ default:
+ ret = 0; // Not supported (but be gentle, since it's actually invoked)
+ break;
+ }
+ return ret;
+}
+
+long mutable_buffer_bio_ctrl(::BIO* bio, int cmd, long num, void* ptr) {
+ return do_buffer_bio_ctrl<MutableBufferView>(bio, cmd, num, ptr);
+}
+
+long const_buffer_bio_ctrl(::BIO* bio, int cmd, long num, void* ptr) {
+ return do_buffer_bio_ctrl<ConstBufferView>(bio, cmd, num, ptr);
+}
+
+MutableBufferView mutable_buffer_view_of(char* buffer, size_t sz) {
+ return {buffer, sz, 0, 0};
+}
+
+ConstBufferView const_buffer_view_of(const char* buffer, size_t sz) {
+ return {buffer, sz, 0};
+}
+
+[[maybe_unused]] bool is_const_bio(::BIO& bio) noexcept {
+ return (::BIO_method_type(&bio) == const_buf_method().type_index);
+}
+
+[[maybe_unused]] bool is_mutable_bio(::BIO& bio) noexcept {
+ return (::BIO_method_type(&bio) == mutable_buf_method().type_index);
+}
+
+// There is a cute little bug in BIO_meth_new() present in v1.1.0h which
+// causes the provided BIO method type to not be actually written into the
+// target BIO_METHOD instance. This means that any assertions that check the
+// BIO's method type on this version is doomed to fail.
+// See https://github.com/openssl/openssl/pull/5812
+#if ((OPENSSL_VERSION_NUMBER & 0xfffffff0L) != 0x10100080L)
+# define WHEN_NO_OPENSSL_BIO_TYPE_BUG(expr) expr
+#else
+# define WHEN_NO_OPENSSL_BIO_TYPE_BUG(expr)
+#endif
+
+void set_bio_mutable_buffer_view(::BIO& bio, MutableBufferView* view) {
+ WHEN_NO_OPENSSL_BIO_TYPE_BUG(LOG_ASSERT(is_mutable_bio(bio)));
+ set_bio_data(bio, view);
+}
+
+void set_bio_const_buffer_view(::BIO& bio, ConstBufferView* view) {
+ WHEN_NO_OPENSSL_BIO_TYPE_BUG(LOG_ASSERT(is_const_bio(bio)));
+ set_bio_data(bio, view);
+}
+
+// Precondition: bio must have been created by a call to either
+// new_mutable_direct_buffer_bio() or new_const_direct_buffer_bio()
+void unset_bio_buffer_view(::BIO& bio) {
+ WHEN_NO_OPENSSL_BIO_TYPE_BUG(LOG_ASSERT(is_mutable_bio(bio) || is_const_bio(bio)));
+ set_bio_data(bio, nullptr);
+}
+
+} // anon ns
+
+ConstBufferViewGuard::ConstBufferViewGuard(::BIO& bio, const char* buffer, size_t sz) noexcept
+ : _bio(bio),
+ _view(const_buffer_view_of(buffer, sz))
+{
+ WHEN_NO_OPENSSL_BIO_TYPE_BUG(LOG_ASSERT(is_const_bio(bio)));
+ set_bio_const_buffer_view(bio, &_view);
+}
+
+ConstBufferViewGuard::~ConstBufferViewGuard() {
+ unset_bio_buffer_view(_bio);
+}
+
+MutableBufferViewGuard::MutableBufferViewGuard(::BIO& bio, char* buffer, size_t sz) noexcept
+ : _bio(bio),
+ _view(mutable_buffer_view_of(buffer, sz))
+{
+ WHEN_NO_OPENSSL_BIO_TYPE_BUG(LOG_ASSERT(is_mutable_bio(bio)));
+ set_bio_mutable_buffer_view(bio, &_view);
+}
+
+MutableBufferViewGuard::~MutableBufferViewGuard() {
+ unset_bio_buffer_view(_bio);
+}
+
+}
diff --git a/vespalib/src/vespa/vespalib/net/tls/impl/direct_buffer_bio.h b/vespalib/src/vespa/vespalib/net/tls/impl/direct_buffer_bio.h
new file mode 100644
index 00000000000..581d43d6f29
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/net/tls/impl/direct_buffer_bio.h
@@ -0,0 +1,90 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "openssl_typedefs.h"
+#include <openssl/bio.h>
+
+/*
+ * Custom BIO implementations which offer direct write/read only buffer
+ * access to underlying memory buffers. This removes the need to allocate
+ * separate memory BIOs into/from which data is redundantly copied.
+ *
+ * These BIOs are merely views into buffers that the user must set appropriately
+ * before invoking OpenSSL functions that invoke them. The ability to set buffers
+ * is only available via scoped guards that cannot be copied or moved.
+ *
+ * Since no buffer allocation is ever done by these BIOs, it is the responsibility
+ * of the caller to provide sufficiently large buffers that OpenSSL operations can
+ * make progress.
+ *
+ * The BIOs ensure that OpenSSL cannot write to read-only buffers and vice versa.
+ */
+
+namespace vespalib::net::tls::impl {
+
+BioPtr new_mutable_direct_buffer_bio();
+BioPtr new_const_direct_buffer_bio();
+
+struct MutableBufferView {
+ // Could use a pointer pair instead (or just modify the ptr), but being explicit is good for readability.
+ char* buffer;
+ size_t size;
+ size_t pos;
+ size_t rpos;
+
+ // Pending means "how much is written"
+ size_t pending() const noexcept {
+ return pos;
+ }
+};
+
+struct ConstBufferView {
+ const char* buffer;
+ size_t size;
+ size_t pos;
+
+ // Pending means "how much is left to read"
+ size_t pending() const noexcept {
+ return size - pos;
+ }
+};
+
+class ConstBufferViewGuard {
+ ::BIO& _bio;
+ ConstBufferView _view;
+public:
+ // Important: buffer view pointer and the buffer it points to MUST be
+ // valid until unset_bio_buffer_view is called! Exception to the latter is
+ // if the data buffer length is 0 AND the data buffer pointer is nullptr.
+ // Precondition: bio must have been created by a call to new_const_direct_buffer_bio()
+ ConstBufferViewGuard(::BIO& bio, const char* buffer, size_t sz) noexcept;
+ ~ConstBufferViewGuard();
+
+ // The current active buffer view has a reference into our own struct, so
+ // we cannot allow that pointer to be invalidated by copies or moves.
+ ConstBufferViewGuard(const ConstBufferViewGuard&) = delete;
+ ConstBufferViewGuard& operator=(const ConstBufferViewGuard&) = delete;
+ ConstBufferViewGuard(ConstBufferViewGuard&&) = delete;
+ ConstBufferViewGuard& operator=(ConstBufferViewGuard&&) = delete;
+};
+
+class MutableBufferViewGuard {
+ ::BIO& _bio;
+ MutableBufferView _view;
+public:
+ // Important: buffer view pointer and the buffer it points to MUST be
+ // valid until unset_bio_buffer_view is called! Exception to the latter is
+ // if the data buffer length is 0 AND the data buffer pointer is nullptr.
+ // Precondition: bio must have been created by a call to new_mutable_direct_buffer_bio()
+ MutableBufferViewGuard(::BIO& bio, char* buffer, size_t sz) noexcept;
+ ~MutableBufferViewGuard();
+
+ // The current active buffer view has a reference into our own struct, so
+ // we cannot allow that pointer to be invalidated by copies or moves.
+ MutableBufferViewGuard(const MutableBufferViewGuard&) = delete;
+ MutableBufferViewGuard& operator=(const MutableBufferViewGuard&) = delete;
+ MutableBufferViewGuard(MutableBufferViewGuard&&) = delete;
+ MutableBufferViewGuard& operator=(MutableBufferViewGuard&&) = delete;
+};
+
+}
diff --git a/vespalib/src/vespa/vespalib/net/tls/impl/openssl_crypto_codec_impl.cpp b/vespalib/src/vespa/vespalib/net/tls/impl/openssl_crypto_codec_impl.cpp
index a563a43baac..d1cddcfaf8c 100644
--- a/vespalib/src/vespa/vespalib/net/tls/impl/openssl_crypto_codec_impl.cpp
+++ b/vespalib/src/vespa/vespalib/net/tls/impl/openssl_crypto_codec_impl.cpp
@@ -1,6 +1,7 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "openssl_crypto_codec_impl.h"
#include "openssl_tls_context_impl.h"
+#include "direct_buffer_bio.h"
#include <vespa/vespalib/net/tls/crypto_codec.h>
#include <vespa/vespalib/net/tls/crypto_exception.h>
#include <mutex>
@@ -80,10 +81,6 @@ HandshakeResult handshake_consumed_bytes_and_needs_more_peer_data(size_t consume
return {consumed, 0, HandshakeResult::State::NeedsMorePeerData};
}
-HandshakeResult handshake_produced_bytes_and_needs_more_peer_data(size_t produced) noexcept {
- return {0, produced, HandshakeResult::State::NeedsMorePeerData};
-}
-
HandshakeResult handshake_consumed_bytes_and_is_complete(size_t consumed) noexcept {
return {consumed, 0, HandshakeResult::State::Done};
}
@@ -124,12 +121,19 @@ DecodeResult decoded_bytes(size_t consumed, size_t produced, DecodeResult::State
return {consumed, produced, state};
}
-BioPtr new_tls_frame_memory_bio() {
- BioPtr bio(::BIO_new(BIO_s_mem()));
+BioPtr new_tls_frame_mutable_memory_bio() {
+ BioPtr bio(new_mutable_direct_buffer_bio());
if (!bio) {
- throw CryptoException("IO_new(BIO_s_mem()) failed; out of memory?");
+ throw CryptoException("new_mutable_direct_buffer_bio() failed; out of memory?");
+ }
+ return bio;
+}
+
+BioPtr new_tls_frame_const_memory_bio() {
+ BioPtr bio(new_const_direct_buffer_bio());
+ if (!bio) {
+ throw CryptoException("new_const_direct_buffer_bio() failed; out of memory?");
}
- BIO_set_write_buf_size(bio.get(), 0); // 0 ==> default max frame size
return bio;
}
@@ -148,10 +152,6 @@ OpenSslCryptoCodecImpl::OpenSslCryptoCodecImpl(::SSL_CTX& ctx, Mode mode)
* a bit more straight forward to implement a full duplex API with two
* separate BIOs, but there is little available documentation as to the
* 'hows' and 'whys' around this.
- * There are claims from core OpenSSL devs[0] that BIO pairs are more efficient,
- * so we may reconsider the current approach (or just use the "OpenSSL controls
- * the file descriptor" yolo approach for simplicity, assuming they do optimal
- * stuff internally).
*
* Our BIOs are used as follows:
*
@@ -167,14 +167,16 @@ OpenSslCryptoCodecImpl::OpenSslCryptoCodecImpl(::SSL_CTX& ctx, Mode mode)
* encode() : SSL_write(plaintext) --(_output_bio ciphertext)--> BIO_read --> [peer]
* decode() : SSL_read(plaintext) <--(_input_bio ciphertext)-- BIO_write <-- [peer]
*
- * To avoid blowing the sizes of BIOs out of the water, we do our best to encode and decode
- * on a per-TLS frame granularity (16K) maximum.
*/
- BioPtr tmp_input_bio = new_tls_frame_memory_bio();
- BioPtr tmp_output_bio = new_tls_frame_memory_bio();
- // Connect BIOs used internally by OpenSSL. This transfers ownership. No return value to check.
- // TODO replace with explicit SSL_set0_rbio/SSL_set0_wbio on OpenSSL >= v1.1
+ BioPtr tmp_input_bio = new_tls_frame_const_memory_bio();
+ BioPtr tmp_output_bio = new_tls_frame_mutable_memory_bio();
+ // Connect BIOs used internally by OpenSSL. This transfers ownership. No return values to check.
+#if (OPENSSL_VERSION_NUMBER >= 0x10100000L)
+ ::SSL_set0_rbio(_ssl.get(), tmp_input_bio.get());
+ ::SSL_set0_wbio(_ssl.get(), tmp_output_bio.get());
+#else
::SSL_set_bio(_ssl.get(), tmp_input_bio.get(), tmp_output_bio.get());
+#endif
_input_bio = tmp_input_bio.release();
_output_bio = tmp_output_bio.release();
if (_mode == Mode::Client) {
@@ -186,22 +188,6 @@ OpenSslCryptoCodecImpl::OpenSslCryptoCodecImpl(::SSL_CTX& ctx, Mode mode)
// TODO remove spammy logging once code is stable
-// Produces bytes previously written to _output_bio by SSL_do_handshake or SSL_write
-int OpenSslCryptoCodecImpl::drain_outgoing_network_bytes_if_any(
- char *to_peer, size_t to_peer_buf_size) noexcept {
- int out_pending = BIO_pending(_output_bio);
- if (out_pending > 0) {
- int copied = ::BIO_read(_output_bio, to_peer, static_cast<int>(to_peer_buf_size));
- // TODO BIO_should_retry here? Semantics are unclear, especially for memory BIOs.
- LOG(spam, "BIO_read copied out %d bytes of ciphertext from _output_bio", copied);
- if (copied < 0) {
- LOG(error, "Memory BIO_read() failed with BIO_pending() > 0");
- }
- return copied;
- }
- return out_pending;
-}
-
HandshakeResult OpenSslCryptoCodecImpl::handshake(const char* from_peer, size_t from_peer_buf_size,
char* to_peer, size_t to_peer_buf_size) noexcept {
LOG_ASSERT(verify_buf(from_peer, from_peer_buf_size) && verify_buf(to_peer, to_peer_buf_size));
@@ -209,75 +195,53 @@ HandshakeResult OpenSslCryptoCodecImpl::handshake(const char* from_peer, size_t
if (SSL_is_init_finished(_ssl.get())) {
return handshake_completed();
}
- // Still ciphertext data left? If so, get rid of it before we start a new operation
- // that wants to fill the output BIO.
- int produced = drain_outgoing_network_bytes_if_any(to_peer, to_peer_buf_size);
- if (produced > 0) {
- // Handshake isn't complete yet and we've got stuff to send. Need to continue handshake
- // once more data is available from the peer.
- return handshake_produced_bytes_and_needs_more_peer_data(static_cast<size_t>(produced));
- } else if (produced < 0) {
- return handshake_failed();
- }
- const auto consume_res = do_handshake_and_consume_peer_input_bytes(from_peer, from_peer_buf_size);
+ ConstBufferViewGuard const_view_guard(*_input_bio, from_peer, from_peer_buf_size);
+ MutableBufferViewGuard mut_view_guard(*_output_bio, to_peer, to_peer_buf_size);
+
+ const auto consume_res = do_handshake_and_consume_peer_input_bytes();
LOG_ASSERT(consume_res.bytes_produced == 0);
if (consume_res.failed()) {
return consume_res;
}
// SSL_do_handshake() might have produced more data to send. Note: handshake may
// be complete at this point.
- produced = drain_outgoing_network_bytes_if_any(to_peer, to_peer_buf_size);
- if (produced < 0) {
- return handshake_failed();
- }
+ int produced = BIO_pending(_output_bio);
return handshaked_bytes(consume_res.bytes_consumed, static_cast<size_t>(produced), consume_res.state);
}
-HandshakeResult OpenSslCryptoCodecImpl::do_handshake_and_consume_peer_input_bytes(
- const char *from_peer, size_t from_peer_buf_size) noexcept {
- // Feed the SSL session input in frame-sized chunks between each call to SSL_do_handshake().
- // This is primarily to ensure we don't shove unbounded amounts of data into the BIO
- // in the case that someone naughty is sending us tons of garbage over the socket.
- size_t consumed_total = 0;
- while (true) {
- // Assumption: SSL_do_handshake will place all required outgoing handshake
- // data in the output memory BIO without requiring WANT_WRITE. Freestanding
- // memory BIOs are _supposedly_ auto-resizing, so this should work transparently.
- // At the very least, if this is not the case we'll auto-fail the connection
- // and quickly find out..!
- // TODO test multi-frame sized handshake
- // TODO should we invoke ::ERR_clear_error() prior?
- int ssl_result = ::SSL_do_handshake(_ssl.get());
- ssl_result = ::SSL_get_error(_ssl.get(), ssl_result);
-
- if (ssl_result == SSL_ERROR_WANT_READ) {
- LOG(spam, "SSL_do_handshake() returned SSL_ERROR_WANT_READ");
- if (from_peer_buf_size - consumed_total > 0) {
- int consumed = ::BIO_write(_input_bio, from_peer + consumed_total,
- static_cast<int>(std::min(MaximumTlsFrameSize, from_peer_buf_size - consumed_total)));
- LOG(spam, "BIO_write copied in %d bytes of ciphertext to _input_bio", consumed);
- if (consumed < 0) {
- LOG(error, "Memory BIO_write() returned %d", consumed); // TODO BIO_need_retry?
- return handshake_failed();
- }
- consumed_total += consumed; // TODO protect against consumed == 0?
- continue;
- } else {
- return handshake_consumed_bytes_and_needs_more_peer_data(consumed_total);
- }
- } else if (ssl_result == SSL_ERROR_NONE) {
- // At this point SSL_do_handshake has stated it does not need any more peer data, i.e.
- // the handshake is complete.
- if (!SSL_is_init_finished(_ssl.get())) {
- LOG(error, "SSL handshake is not completed even though no more peer data is requested");
- return handshake_failed();
- }
- return handshake_consumed_bytes_and_is_complete(consumed_total);
- } else {
- LOG(error, "SSL_do_handshake() returned unexpected error: %s", ssl_error_to_str(ssl_result));
+HandshakeResult OpenSslCryptoCodecImpl::do_handshake_and_consume_peer_input_bytes() noexcept {
+ // Assumption: SSL_do_handshake will place all required outgoing handshake
+ // data in the output memory BIO without requiring WANT_WRITE.
+ // TODO test multi-frame sized handshake
+ const long pending_read_before = BIO_pending(_input_bio);
+
+ ::ERR_clear_error();
+ int ssl_result = ::SSL_do_handshake(_ssl.get());
+ ssl_result = ::SSL_get_error(_ssl.get(), ssl_result);
+
+ const long consumed = pending_read_before - BIO_pending(_input_bio);
+ LOG_ASSERT(consumed >= 0);
+
+ if (ssl_result == SSL_ERROR_WANT_READ) {
+ LOG(spam, "SSL_do_handshake() returned SSL_ERROR_WANT_READ");
+
+ return handshake_consumed_bytes_and_needs_more_peer_data(static_cast<size_t>(consumed));
+ } else if (ssl_result == SSL_ERROR_NONE) {
+ // At this point SSL_do_handshake has stated it does not need any more peer data, i.e.
+ // the handshake is complete.
+ if (!SSL_is_init_finished(_ssl.get())) {
+ LOG(error, "SSL handshake is not completed even though no more peer data is requested");
return handshake_failed();
}
- };
+ LOG(debug, "SSL_do_handshake() is complete, using protocol %s", SSL_get_version(_ssl.get()));
+ return handshake_consumed_bytes_and_is_complete(static_cast<size_t>(consumed));
+ } else {
+ LOG(error, "SSL_do_handshake() returned unexpected error: %s", ssl_error_to_str(ssl_result));
+ char buf[256];
+ ERR_error_string_n(ERR_get_error(), buf, sizeof(buf));
+ LOG(error, "%s", buf);
+ return handshake_failed();
+ }
}
EncodeResult OpenSslCryptoCodecImpl::encode(const char* plaintext, size_t plaintext_size,
@@ -288,13 +252,16 @@ EncodeResult OpenSslCryptoCodecImpl::encode(const char* plaintext, size_t plaint
LOG(error, "OpenSslCryptoCodecImpl::encode() called before handshake completed");
return encode_failed();
}
+
+ MutableBufferViewGuard mut_view_guard(*_output_bio, ciphertext, ciphertext_size);
+ // _input_bio not read from here.
+
size_t bytes_consumed = 0;
if (plaintext_size != 0) {
int to_consume = static_cast<int>(std::min(plaintext_size, MaximumFramePlaintextSize));
// SSL_write encodes plaintext to ciphertext and writes to _output_bio
int consumed = ::SSL_write(_ssl.get(), plaintext, to_consume);
- LOG(spam, "After SSL_write() -> %d, _input_bio pending=%d, _output_bio pending=%d",
- consumed, BIO_pending(_input_bio), BIO_pending(_output_bio));
+ LOG(spam, "After SSL_write() -> %d _output_bio pending=%d", consumed, BIO_pending(_output_bio));
if (consumed < 0) {
int ssl_error = ::SSL_get_error(_ssl.get(), consumed);
LOG(error, "SSL_write() failed to write frame, got error %s", ssl_error_to_str(ssl_error));
@@ -306,15 +273,7 @@ EncodeResult OpenSslCryptoCodecImpl::encode(const char* plaintext, size_t plaint
}
bytes_consumed = static_cast<size_t>(consumed);
}
-
- int produced = drain_outgoing_network_bytes_if_any(ciphertext, ciphertext_size);
- if (produced < 0) {
- return encode_failed();
- }
- if (BIO_pending(_output_bio) != 0) {
- LOG(error, "Residual data left in output BIO on encode(); provided buffer is too small");
- return encode_failed();
- }
+ int produced = BIO_pending(_output_bio);
return encoded_bytes(bytes_consumed, static_cast<size_t>(produced));
}
DecodeResult OpenSslCryptoCodecImpl::decode(const char* ciphertext, size_t ciphertext_size,
@@ -325,15 +284,18 @@ DecodeResult OpenSslCryptoCodecImpl::decode(const char* ciphertext, size_t ciphe
LOG(error, "OpenSslCryptoCodecImpl::decode() called before handshake completed");
return decode_failed();
}
+ ConstBufferViewGuard const_view_guard(*_input_bio, ciphertext, ciphertext_size);
+ // _output_bio not written to here
+
+ const int input_pending_before = BIO_pending(_input_bio);
auto produce_res = drain_and_produce_plaintext_from_ssl(plaintext, static_cast<int>(plaintext_size));
- if ((produce_res.bytes_produced > 0) || produce_res.failed()) {
- return produce_res; // TODO gRPC [1] handles this differently... allows fallthrough
- }
- int consumed = consume_peer_input_bytes(ciphertext, ciphertext_size);
- if (consumed < 0) {
- return decode_failed();
- }
- produce_res = drain_and_produce_plaintext_from_ssl(plaintext, static_cast<int>(plaintext_size));
+ const int input_pending_after = BIO_pending(_input_bio);
+
+ LOG_ASSERT(input_pending_before >= input_pending_after);
+ const int consumed = input_pending_before - input_pending_after;
+ LOG(spam, "decode: consumed %d bytes (ciphertext buffer %d -> %d bytes), produced %zu bytes. Need read: %s",
+ consumed, input_pending_before, input_pending_after, produce_res.bytes_produced,
+ (produce_res.state == DecodeResult::State::NeedsMorePeerData) ? "yes" : "no");
return decoded_bytes(static_cast<size_t>(consumed), produce_res.bytes_produced, produce_res.state);
}
@@ -345,37 +307,24 @@ DecodeResult OpenSslCryptoCodecImpl::drain_and_produce_plaintext_from_ssl(
// depending on how much TLS frame data is available and its size relative
// to the receiving plaintext buffer.
int produced = ::SSL_read(_ssl.get(), plaintext, static_cast<int>(plaintext_size));
- LOG(spam, "After SSL_read() -> %d, _input_bio pending=%d, _output_bio pending=%d",
- produced, BIO_pending(_input_bio), BIO_pending(_output_bio));
if (produced > 0) {
// At least 1 frame decoded successfully.
return decoded_frames_with_plaintext_bytes(static_cast<size_t>(produced));
} else {
int ssl_error = ::SSL_get_error(_ssl.get(), produced);
switch (ssl_error) {
- case SSL_ERROR_WANT_READ:
- // SSL_read() was not able to decode a full frame with the ciphertext that
- // we've fed it thus far; caller must feed it some and then try again.
- LOG(spam, "SSL_read() returned SSL_ERROR_WANT_READ, must get more ciphertext");
- return decode_needs_more_peer_data();
- default:
- LOG(error, "SSL_read() returned unexpected error: %s", ssl_error_to_str(ssl_error));
- return decode_failed();
+ case SSL_ERROR_WANT_READ:
+ // SSL_read() was not able to decode a full frame with the ciphertext that
+ // we've fed it thus far; caller must feed it some and then try again.
+ LOG(spam, "SSL_read() returned SSL_ERROR_WANT_READ, must get more ciphertext");
+ return decode_needs_more_peer_data();
+ default:
+ LOG(error, "SSL_read() returned unexpected error: %s", ssl_error_to_str(ssl_error));
+ return decode_failed();
}
}
}
-int OpenSslCryptoCodecImpl::consume_peer_input_bytes(
- const char* ciphertext, size_t ciphertext_size) noexcept {
- // TODO BIO_need_retry on failure? Can this even happen for memory BIOs?
- int consumed = ::BIO_write(_input_bio, ciphertext, static_cast<int>(std::min(MaximumTlsFrameSize, ciphertext_size)));
- LOG(spam, "BIO_write copied in %d bytes of ciphertext to _input_bio", consumed);
- if (consumed < 0) {
- LOG(error, "Memory BIO_write() returned %d", consumed);
- }
- return consumed;
-}
-
}
// External references:
diff --git a/vespalib/src/vespa/vespalib/net/tls/impl/openssl_crypto_codec_impl.h b/vespalib/src/vespa/vespalib/net/tls/impl/openssl_crypto_codec_impl.h
index 44ca8859596..4c253fdf24c 100644
--- a/vespalib/src/vespa/vespalib/net/tls/impl/openssl_crypto_codec_impl.h
+++ b/vespalib/src/vespa/vespalib/net/tls/impl/openssl_crypto_codec_impl.h
@@ -55,21 +55,7 @@ public:
DecodeResult decode(const char* ciphertext, size_t ciphertext_size,
char* plaintext, size_t plaintext_size) noexcept override;
private:
- /*
- * Returns
- * n > 0 if n bytes written to `to_peer`. Always <= to_peer_buf_size
- * n == 0 if no bytes pending in output BIO
- * n < 0 on error
- */
- int drain_outgoing_network_bytes_if_any(char *to_peer, size_t to_peer_buf_size) noexcept;
- /*
- * Returns
- * n > 0 if n bytes written to `ciphertext`. Always <= ciphertext_size
- * n == 0 if no bytes pending in input BIO
- * n < 0 on error
- */
- int consume_peer_input_bytes(const char* ciphertext, size_t ciphertext_size) noexcept;
- HandshakeResult do_handshake_and_consume_peer_input_bytes(const char *from_peer, size_t from_peer_buf_size) noexcept;
+ HandshakeResult do_handshake_and_consume_peer_input_bytes() noexcept;
DecodeResult drain_and_produce_plaintext_from_ssl(char* plaintext, size_t plaintext_size) noexcept;
};